summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/bonding/bond_alb.c8
-rw-r--r--drivers/net/bonding/bond_main.c180
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c8
-rw-r--r--drivers/net/can/dev/dev.c12
-rw-r--r--drivers/net/can/dev/netlink.c74
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c4
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c10
-rw-r--r--drivers/net/can/kvaser_pciefd.c188
-rw-r--r--drivers/net/can/m_can/m_can.c8
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c6
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c280
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c4
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c2
-rw-r--r--drivers/net/can/slcan/slcan-core.c26
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c10
-rw-r--r--drivers/net/can/usb/esd_usb.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c4
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c6
-rw-r--r--drivers/net/can/usb/gs_usb.c8
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c6
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/b53/b53_common.c61
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/b53/b53_regs.h21
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c24
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h5
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c194
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c274
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h44
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c30
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h7
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c270
-rw-r--r--drivers/net/dsa/mt7530.h60
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h16
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c11
-rw-r--r--drivers/net/dsa/ocelot/felix.c11
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c6
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c46
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h7
-rw-r--r--drivers/net/ethernet/airoha/Kconfig7
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c514
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h102
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c178
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h4
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c485
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h203
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h122
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c268
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-smn.h30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h128
-rw-r--r--drivers/net/ethernet/apple/bmac.c60
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c176
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h78
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c36
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c13
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c250
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c277
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h32
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c23
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c25
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c18
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c37
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c14
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c30
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c41
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c41
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig12
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c123
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h20
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c369
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c50
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c78
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c107
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c93
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c462
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c53
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c3
-rw-r--r--drivers/net/ethernet/huawei/Kconfig1
-rw-r--r--drivers/net/ethernet/huawei/Makefile1
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Kconfig20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h53
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h113
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h81
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h58
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c62
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c414
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.h21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c354
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h15
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h13
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h105
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c78
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c233
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h41
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h82
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h120
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c68
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h54
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c341
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h90
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c670
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h135
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c29
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h76
-rw-r--r--drivers/net/ethernet/ibm/Kconfig13
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c358
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h65
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c75
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c81
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c82
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c54
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c14
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c67
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c85
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c873
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h362
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c189
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c161
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h84
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c615
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h314
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c78
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h55
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c90
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c247
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c557
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c290
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c1339
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c257
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c707
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c282
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h175
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c58
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c45
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c121
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h61
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c467
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c414
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1348
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c290
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c716
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c191
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c258
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c178
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c311
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h56
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c335
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h48
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c47
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c42
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c62
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c63
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c33
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c99
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c7
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/r8169.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c434
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c205
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h15
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c66
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c146
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c88
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c174
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c374
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h64
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c30
-rw-r--r--drivers/net/ethernet/ti/cpsw.c26
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c24
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h33
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c80
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c357
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c188
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c176
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h77
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c30
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c909
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.h18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h115
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c94
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c385
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h15
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c38
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c12
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c64
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c206
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c47
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h118
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c61
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c18
-rw-r--r--drivers/net/hamradio/baycom_epp.c5
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c57
-rw-r--r--drivers/net/hyperv/netvsc_drv.c68
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c1
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c1
-rw-r--r--drivers/net/ipa/ipa_data.h2
-rw-r--r--drivers/net/ipa/ipa_mem.c21
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/mctp/mctp-usb.c2
-rw-r--r--drivers/net/mdio/Kconfig48
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c522
-rw-r--r--drivers/net/mdio/mdio-thunder.c10
-rw-r--r--drivers/net/mdio/of_mdio.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c15
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/ovpn/Makefile22
-rw-r--r--drivers/net/ovpn/bind.c55
-rw-r--r--drivers/net/ovpn/bind.h101
-rw-r--r--drivers/net/ovpn/crypto.c210
-rw-r--r--drivers/net/ovpn/crypto.h145
-rw-r--r--drivers/net/ovpn/crypto_aead.c389
-rw-r--r--drivers/net/ovpn/crypto_aead.h29
-rw-r--r--drivers/net/ovpn/io.c458
-rw-r--r--drivers/net/ovpn/io.h34
-rw-r--r--drivers/net/ovpn/main.c279
-rw-r--r--drivers/net/ovpn/main.h14
-rw-r--r--drivers/net/ovpn/netlink-gen.c213
-rw-r--r--drivers/net/ovpn/netlink-gen.h41
-rw-r--r--drivers/net/ovpn/netlink.c1258
-rw-r--r--drivers/net/ovpn/netlink.h18
-rw-r--r--drivers/net/ovpn/ovpnpriv.h55
-rw-r--r--drivers/net/ovpn/peer.c1364
-rw-r--r--drivers/net/ovpn/peer.h163
-rw-r--r--drivers/net/ovpn/pktid.c129
-rw-r--r--drivers/net/ovpn/pktid.h86
-rw-r--r--drivers/net/ovpn/proto.h118
-rw-r--r--drivers/net/ovpn/skb.h61
-rw-r--r--drivers/net/ovpn/socket.c233
-rw-r--r--drivers/net/ovpn/socket.h49
-rw-r--r--drivers/net/ovpn/stats.c21
-rw-r--r--drivers/net/ovpn/stats.h47
-rw-r--r--drivers/net/ovpn/tcp.c598
-rw-r--r--drivers/net/ovpn/tcp.h36
-rw-r--r--drivers/net/ovpn/udp.c449
-rw-r--r--drivers/net/ovpn/udp.h25
-rw-r--r--drivers/net/pfcp.c23
-rw-r--r--drivers/net/phy/Kconfig29
-rw-r--r--drivers/net/phy/Makefile22
-rw-r--r--drivers/net/phy/air_en8811h.c103
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c6
-rw-r--r--drivers/net/phy/as21xxx.c1087
-rw-r--r--drivers/net/phy/bcm87xx.c14
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/dp83822.c33
-rw-r--r--drivers/net/phy/dp83867.c76
-rw-r--r--drivers/net/phy/fixed_phy.c40
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c111
-rw-r--r--drivers/net/phy/marvell10g.c12
-rw-r--r--drivers/net/phy/mdio_bus.c476
-rw-r--r--drivers/net/phy/mdio_bus_provider.c484
-rw-r--r--drivers/net/phy/mdio_device.c1
-rw-r--r--drivers/net/phy/mediatek/Kconfig20
-rw-r--r--drivers/net/phy/mediatek/Makefile3
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c321
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c91
-rw-r--r--drivers/net/phy/micrel.c30
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c5
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c20
-rw-r--r--drivers/net/phy/mxl-86110.c616
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c54
-rw-r--r--drivers/net/phy/nxp-tja11xx.c6
-rw-r--r--drivers/net/phy/phy_device.c102
-rw-r--r--drivers/net/phy/phylink.c7
-rw-r--r--drivers/net/phy/realtek/realtek_main.c337
-rw-r--r--drivers/net/phy/teranetics.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c25
-rw-r--r--drivers/net/tap.c14
-rw-r--r--drivers/net/team/team_core.c8
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c22
-rw-r--r--drivers/net/usb/asix_devices.c17
-rw-r--r--drivers/net/usb/lan78xx.c471
-rw-r--r--drivers/net/usb/r8152.c98
-rw-r--r--drivers/net/veth.c57
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c560
-rw-r--r--drivers/net/vxlan/vxlan_private.h11
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c20
-rw-r--r--drivers/net/wireguard/allowedips.c102
-rw-r--r--drivers/net/wireguard/allowedips.h4
-rw-r--r--drivers/net/wireguard/cookie.c4
-rw-r--r--drivers/net/wireguard/netlink.c47
-rw-r--r--drivers/net/wireguard/noise.c4
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c32
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c302
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c52
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c50
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1155
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c329
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h169
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c497
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c154
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h53
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1097
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c596
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c209
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c153
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c121
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c511
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h30
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1439
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h56
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c66
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c238
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c526
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c558
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h119
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c308
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c378
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c227
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c168
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h498
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h)59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c204
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c)200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c292
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c174
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h1
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c44
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c51
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c158
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c196
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h51
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c35
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c61
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c27
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c63
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1037
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h190
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c418
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c493
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h144
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c174
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c480
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h94
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c58
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c36
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c147
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c30
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c296
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h31
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c12
1040 files changed, 58933 insertions, 18698 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 271520510b5f..b29628d46be9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -115,6 +115,21 @@ config WIREGUARD_DEBUG
Say N here unless you know what you're doing.
+config OVPN
+ tristate "OpenVPN data channel offload"
+ depends on NET && INET
+ depends on IPV6 || !IPV6
+ select DST_CACHE
+ select NET_UDP_TUNNEL
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ select CRYPTO_CHACHA20POLY1305
+ select STREAM_PARSER
+ help
+ This module enhances the performance of the OpenVPN userspace software
+ by offloading the data channel processing to kernelspace.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
help
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 75333251a01a..73bc63ecd65f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_IPVTAP) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_WIREGUARD) += wireguard/
+obj-$(CONFIG_OVPN) += ovpn/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index d1473c5f8eef..a9dffdcac805 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -777,27 +777,19 @@ static __net_init int bareudp_init_net(struct net *net)
return 0;
}
-static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit bareudp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_kill_list)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *bareudp, *next;
list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
- unregister_netdevice_queue(bareudp->dev, head);
-}
-
-static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
-{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, dev_kill_list);
+ bareudp_dellink(bareudp->dev, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch_rtnl = bareudp_exit_batch_rtnl,
+ .exit_rtnl = bareudp_exit_rtnl_net,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7edf0fd58c34..2d37b07c8215 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1035,7 +1035,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
- if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
+ if (dev_set_mac_address(dev, &ss, NULL)) {
slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
return -EOPNOTSUPP;
}
@@ -1273,8 +1273,7 @@ unwind:
break;
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
- dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(rollback_slave->dev, &ss, NULL);
dev_addr_set(rollback_slave->dev, tmp_addr);
}
@@ -1763,8 +1762,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
- NULL);
+ dev_set_mac_address(new_slave->dev, &ss, NULL);
dev_addr_set(new_slave->dev, tmp_addr);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8ea183da8d53..c4d53e8e7c15 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -453,13 +453,14 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs)
/**
* bond_ipsec_add_sa - program device with a security association
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs,
+static int bond_ipsec_add_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
netdevice_tracker tracker;
struct bond_ipsec *ipsec;
@@ -495,9 +496,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
goto out;
}
- xs->xso.real_dev = real_dev;
- err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack);
if (!err) {
+ xs->xso.real_dev = real_dev;
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
mutex_lock(&bond->ipsec_lock);
@@ -539,11 +540,25 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
if (ipsec->xs->xso.real_dev == real_dev)
continue;
- ipsec->xs->xso.real_dev = real_dev;
- if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev,
+ ipsec->xs, NULL)) {
slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
- ipsec->xs->xso.real_dev = NULL;
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ /* xs might have been killed by the user during the migration
+ * to the new dev, but bond_ipsec_del_sa() should have done
+ * nothing, as xso.real_dev is NULL.
+ * Delete it from the device we just added it to. The pending
+ * bond_ipsec_free_sa() call will do the rest of the cleanup.
+ */
+ if (ipsec->xs->km.state == XFRM_STATE_DEAD &&
+ real_dev->xfrmdev_ops->xdo_dev_state_delete)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ ipsec->xs->xso.real_dev = real_dev;
+ spin_unlock_bh(&ipsec->xs->lock);
}
out:
mutex_unlock(&bond->ipsec_lock);
@@ -551,54 +566,27 @@ out:
/**
* bond_ipsec_del_sa - clear out this specific SA
+ * @bond_dev: pointer to the bond net device
* @xs: pointer to transformer state struct
**/
-static void bond_ipsec_del_sa(struct xfrm_state *xs)
+static void bond_ipsec_del_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- netdevice_tracker tracker;
- struct bond_ipsec *ipsec;
- struct bonding *bond;
- struct slave *slave;
- if (!bond_dev)
+ if (!bond_dev || !xs->xso.real_dev)
return;
- rcu_read_lock();
- bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
- real_dev = slave ? slave->dev : NULL;
- netdev_hold(real_dev, &tracker, GFP_ATOMIC);
- rcu_read_unlock();
-
- if (!slave)
- goto out;
-
- if (!xs->xso.real_dev)
- goto out;
-
- WARN_ON(xs->xso.real_dev != real_dev);
+ real_dev = xs->xso.real_dev;
if (!real_dev->xfrmdev_ops ||
!real_dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(real_dev)) {
slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- goto out;
+ return;
}
- real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
-out:
- netdev_put(real_dev, &tracker);
- mutex_lock(&bond->ipsec_lock);
- list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- if (ipsec->xs == xs) {
- list_del(&ipsec->list);
- kfree(ipsec);
- break;
- }
- }
- mutex_unlock(&bond->ipsec_lock);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs);
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
@@ -624,46 +612,55 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
- } else {
- real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
- if (real_dev->xfrmdev_ops->xdo_dev_state_free)
- real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
+ continue;
}
+
+ spin_lock_bh(&ipsec->xs->lock);
+ ipsec->xs->xso.real_dev = NULL;
+ /* Don't double delete states killed by the user. */
+ if (ipsec->xs->km.state != XFRM_STATE_DEAD)
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev,
+ ipsec->xs);
+ spin_unlock_bh(&ipsec->xs->lock);
+
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev,
+ ipsec->xs);
}
mutex_unlock(&bond->ipsec_lock);
}
-static void bond_ipsec_free_sa(struct xfrm_state *xs)
+static void bond_ipsec_free_sa(struct net_device *bond_dev,
+ struct xfrm_state *xs)
{
- struct net_device *bond_dev = xs->xso.dev;
struct net_device *real_dev;
- netdevice_tracker tracker;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
- struct slave *slave;
if (!bond_dev)
return;
- rcu_read_lock();
bond = netdev_priv(bond_dev);
- slave = rcu_dereference(bond->curr_active_slave);
- real_dev = slave ? slave->dev : NULL;
- netdev_hold(real_dev, &tracker, GFP_ATOMIC);
- rcu_read_unlock();
-
- if (!slave)
- goto out;
+ mutex_lock(&bond->ipsec_lock);
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != real_dev);
+ real_dev = xs->xso.real_dev;
- if (real_dev && real_dev->xfrmdev_ops &&
+ xs->xso.real_dev = NULL;
+ if (real_dev->xfrmdev_ops &&
real_dev->xfrmdev_ops->xdo_dev_state_free)
- real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs);
out:
- netdev_put(real_dev, &tracker);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+ kfree(ipsec);
+ break;
+ }
+ }
+ mutex_unlock(&bond->ipsec_lock);
}
/**
@@ -1115,8 +1112,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
ss.ss_family = bond->dev->type;
}
- rv = dev_set_mac_address(new_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(new_active->dev, &ss, NULL);
if (rv) {
slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
-rv);
@@ -1130,8 +1126,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
new_active->dev->addr_len);
ss.ss_family = old_active->dev->type;
- rv = dev_set_mac_address(old_active->dev,
- (struct sockaddr *)&ss, NULL);
+ rv = dev_set_mac_address(old_active->dev, &ss, NULL);
if (rv)
slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
-rv);
@@ -2118,15 +2113,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* set the master's mac address to that of the first slave
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
- ss.ss_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
- extack);
- if (res) {
- slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
- goto err_restore_mtu;
- }
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+ */
+ eth_random_addr(ss.__data);
+ } else {
+ goto skip_mac_set;
+ }
+
+ ss.ss_family = slave_dev->type;
+ res = dev_set_mac_address(slave_dev, &ss, extack);
+ if (res) {
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
+ goto err_restore_mtu;
}
+skip_mac_set:
+
/* set no_addrconf flag before open to prevent IPv6 addrconf */
slave_dev->priv_flags |= IFF_NO_ADDRCONF;
@@ -2447,7 +2453,7 @@ err_restore_mac:
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
err_restore_mtu:
@@ -2641,7 +2647,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
+ dev_set_mac_address(slave_dev, &ss, NULL);
}
if (unregister) {
@@ -4928,8 +4934,7 @@ unwind:
if (rollback_slave == slave)
break;
- tmp_res = dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&tmp_ss, NULL);
+ tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_ss, NULL);
if (tmp_res) {
slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
__func__, tmp_res);
@@ -6563,7 +6568,7 @@ static int __net_init bond_net_init(struct net *net)
/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
* race condition in bond unloading") we need to remove sysfs files
- * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ * before we remove our devices (done later in bond_net_exit_rtnl())
*/
static void __net_exit bond_net_pre_exit(struct net *net)
{
@@ -6572,25 +6577,20 @@ static void __net_exit bond_net_pre_exit(struct net *net)
bond_destroy_sysfs(bn);
}
-static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_kill_list)
+static void __net_exit bond_net_exit_rtnl(struct net *net,
+ struct list_head *dev_kill_list)
{
- struct bond_net *bn;
- struct net *net;
+ struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bonding *bond, *tmp_bond;
/* Kill off any bonds created after unregistering bond rtnl ops */
- list_for_each_entry(net, net_list, exit_list) {
- struct bonding *bond, *tmp_bond;
-
- bn = net_generic(net, bond_net_id);
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, dev_kill_list);
- }
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
* only after all bonds are gone") bond_destroy_proc_dir() is called
- * after bond_net_exit_batch_rtnl() has completed.
+ * after bond_net_exit_rtnl() has completed.
*/
static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
@@ -6606,7 +6606,7 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
.pre_exit = bond_net_pre_exit,
- .exit_batch_rtnl = bond_net_exit_batch_rtnl,
+ .exit_rtnl = bond_net_exit_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index f65c1a1e05cc..bf6398772960 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -275,7 +275,7 @@ static int ctucan_set_bittiming(struct net_device *ndev)
static int ctucan_set_data_bittiming(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
/* Note that dbt may be modified here */
return ctucan_set_btr(ndev, dbt, false);
@@ -290,7 +290,7 @@ static int ctucan_set_data_bittiming(struct net_device *ndev)
static int ctucan_set_secondary_sample_point(struct net_device *ndev)
{
struct ctucan_priv *priv = netdev_priv(ndev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
int ssp_offset = 0;
u32 ssp_cfg = 0; /* No SSP by default */
@@ -1358,12 +1358,12 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->ntxbufs = ntxbufs;
priv->dev = dev;
priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
- priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.fd.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
priv->can.do_set_mode = ctucan_do_set_mode;
/* Needed for timing adjustment to be performed as soon as possible */
priv->can.do_set_bittiming = ctucan_set_bittiming;
- priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = ctucan_set_data_bittiming;
priv->can.do_get_berr_counter = ctucan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 5ec3170b896a..ea8c807af4d8 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -404,8 +404,8 @@ int open_candev(struct net_device *dev)
/* For CAN FD the data bitrate has to be >= the arbitration bitrate */
if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
- (!priv->data_bittiming.bitrate ||
- priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
+ (!priv->fd.data_bittiming.bitrate ||
+ priv->fd.data_bittiming.bitrate < priv->bittiming.bitrate)) {
netdev_err(dev, "incorrect/missing data bit-timing\n");
return -EINVAL;
}
@@ -543,16 +543,16 @@ int register_candev(struct net_device *dev)
if (!priv->bitrate_const != !priv->bitrate_const_cnt)
return -EINVAL;
- if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ if (!priv->fd.data_bitrate_const != !priv->fd.data_bitrate_const_cnt)
return -EINVAL;
/* We only support either fixed bit rates or bit timing const. */
- if ((priv->bitrate_const || priv->data_bitrate_const) &&
- (priv->bittiming_const || priv->data_bittiming_const))
+ if ((priv->bitrate_const || priv->fd.data_bitrate_const) &&
+ (priv->bittiming_const || priv->fd.data_bittiming_const))
return -EINVAL;
if (!can_bittiming_const_valid(priv->bittiming_const) ||
- !can_bittiming_const_valid(priv->data_bittiming_const))
+ !can_bittiming_const_valid(priv->fd.data_bittiming_const))
return -EINVAL;
if (!priv->termination_const) {
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index f1db9b7ffd4d..a36842ace084 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -141,7 +141,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
int err;
if (!tdc_const || !can_tdc_is_enabled(priv))
@@ -179,7 +179,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->tdc = tdc;
+ priv->fd.tdc = tdc;
return 0;
}
@@ -228,10 +228,10 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = CANFD_MTU;
} else {
dev->mtu = CAN_MTU;
- memset(&priv->data_bittiming, 0,
- sizeof(priv->data_bittiming));
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
- memset(&priv->tdc, 0, sizeof(priv->tdc));
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
@@ -312,16 +312,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
- !priv->data_bitrate_const)
+ if (!priv->fd.data_bittiming_const && !priv->fd.do_set_data_bittiming &&
+ !priv->fd.data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
sizeof(dbt));
err = can_get_bittiming(dev, &dbt,
- priv->data_bittiming_const,
- priv->data_bitrate_const,
- priv->data_bitrate_const_cnt,
+ priv->fd.data_bittiming_const,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt,
extack);
if (err)
return err;
@@ -333,7 +333,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
- memset(&priv->tdc, 0, sizeof(priv->tdc));
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
if (data[IFLA_CAN_TDC]) {
/* TDC parameters are provided: use them */
err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
@@ -346,17 +346,17 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
- can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
+ can_calc_tdco(&priv->fd.tdc, priv->fd.tdc_const, &dbt,
&priv->ctrlmode, priv->ctrlmode_supported);
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
*/
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+ memcpy(&priv->fd.data_bittiming, &dbt, sizeof(dbt));
- if (priv->do_set_data_bittiming) {
+ if (priv->fd.do_set_data_bittiming) {
/* Finally, set the bit-timing registers */
- err = priv->do_set_data_bittiming(dev);
+ err = priv->fd.do_set_data_bittiming(dev);
if (err)
return err;
}
@@ -394,7 +394,7 @@ static size_t can_tdc_get_size(const struct net_device *dev)
struct can_priv *priv = netdev_priv(dev);
size_t size;
- if (!priv->tdc_const)
+ if (!priv->fd.tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
@@ -404,17 +404,17 @@ static size_t can_tdc_get_size(const struct net_device *dev)
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->tdc_const->tdcf_max) {
+ if (priv->fd.tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
if (can_tdc_is_enabled(priv)) {
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->do_get_auto_tdcv)
+ priv->fd.do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->tdc_const->tdcf_max)
+ if (priv->fd.tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
@@ -442,9 +442,9 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ if (priv->fd.data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ if (priv->fd.data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
@@ -454,9 +454,9 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt);
+ if (priv->fd.data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->fd.data_bitrate_const) *
+ priv->fd.data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
@@ -468,8 +468,8 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+ struct can_tdc *tdc = &priv->fd.tdc;
+ const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
if (!tdc_const)
return 0;
@@ -497,8 +497,8 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->do_get_auto_tdcv) {
- err = priv->do_get_auto_tdcv(dev, &tdcv);
+ } else if (priv->fd.do_get_auto_tdcv) {
+ err = priv->fd.do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -564,14 +564,14 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->data_bittiming.bitrate &&
+ (priv->fd.data_bittiming.bitrate &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+ sizeof(priv->fd.data_bittiming), &priv->fd.data_bittiming)) ||
- (priv->data_bittiming_const &&
+ (priv->fd.data_bittiming_const &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)) ||
+ sizeof(*priv->fd.data_bittiming_const),
+ priv->fd.data_bittiming_const)) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -586,11 +586,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->bitrate_const_cnt,
priv->bitrate_const)) ||
- (priv->data_bitrate_const &&
+ (priv->fd.data_bitrate_const &&
nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->data_bitrate_const) *
- priv->data_bitrate_const_cnt,
- priv->data_bitrate_const)) ||
+ sizeof(*priv->fd.data_bitrate_const) *
+ priv->fd.data_bitrate_const_cnt,
+ priv->fd.data_bitrate_const)) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 6d80c341b26f..06d5d35fc1b5 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -1226,7 +1226,7 @@ static void flexcan_set_bittiming_cbt(const struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_cbt, reg_fdctrl;
@@ -2239,7 +2239,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&flexcan_fd_data_bittiming_const;
} else {
priv->can.bittiming_const = &flexcan_bittiming_const;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index c86b57d47085..2eeee65f606f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -669,7 +669,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
@@ -1000,10 +1000,10 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
- priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.do_set_mode = ifi_canfd_set_mode;
- priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
+ priv->can.bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.fd.data_bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.do_set_mode = ifi_canfd_set_mode;
+ priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
/* IFI CANFD can do both Bosch FD and ISO FD */
priv->can.ctrlmode = CAN_CTRLMODE_FD;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index cf0d51805272..7d3066691d5d 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
void __iomem *reg_base;
struct can_berr_counter bec;
u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
int err_rep_cnt;
- int echo_idx;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- spinlock_t echo_lock; /* Locks the message echo buffer */
struct timer_list bec_poll_timer;
struct completion start_comp, flush_comp;
};
@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ can->tx_idx = 0;
+ can->ack_idx = 0;
+
ret = open_candev(netdev);
if (ret)
return ret;
@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
timer_delete(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
+ netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
+{
+ return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
+}
+
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
- struct kvaser_pciefd_can *can,
+ struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
- int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
- if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
- can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
+ can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
+ unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
+ unsigned int frame_len;
int nr_words;
- u8 count;
if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
+ if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
+ return NETDEV_TX_BUSY;
- nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+ nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx, 0);
-
- /* Move echo index to the next slot */
- can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ WRITE_ONCE(can->can.echo_skb[seq], NULL);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, netdev, seq, frame_len);
+ netdev_sent_queue(netdev, frame_len);
+ WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
- /* No room for a new message, stop the queue until at least one
- * successful transmit
- */
- if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+ netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@@ -856,7 +863,7 @@ static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
struct can_bittiming *bt;
if (data)
- bt = &can->can.data_bittiming;
+ bt = &can->can.fd.data_bittiming;
else
bt = &can->can.bittiming;
@@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
@@ -983,17 +992,16 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
- can->echo_idx = 0;
- spin_lock_init(&can->echo_lock);
+ can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
- can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.fd.data_bittiming_const = &kvaser_pciefd_bittiming_const;
can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
- can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
+ can->can.fd.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming;
can->can.do_set_mode = kvaser_pciefd_set_mode;
can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
@@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
cf->len = can_fd_dlc2len(dlc);
@@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
@@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- return netif_rx(skb);
+ netif_rx(skb);
+
+ return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
@@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
- int len;
- u8 count;
+ unsigned int len, frame_len = 0;
struct sk_buff *skb;
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
+ return 0;
skb = can->can.echo_skb[echo_idx];
- if (skb)
- kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ if (!skb)
+ return 0;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
+ smp_store_release(&can->ack_idx, can->ack_idx + 1);
+ can->completed_tx_pkts++;
+ can->completed_tx_bytes += frame_len;
if (!one_shot_fail) {
can->can.dev->stats.tx_bytes += len;
@@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
+ unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
+ if (!can->completed_tx_pkts)
+ continue;
+ netif_subqueue_completed_wake(can->can.dev, 0,
+ can->completed_tx_pkts,
+ can->completed_tx_bytes,
+ kvaser_pciefd_tx_avail(can), 1);
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
+ }
+
return res;
}
-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
+ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
+ }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
+ }
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
-
- iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
- u32 srb_irq = 0;
- u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
if (pci_irq & irq_mask->kcan_rx0)
- srb_irq = kvaser_pciefd_receive_irq(pcie);
+ kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
-
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
-
- if (srb_release)
- iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
return IRQ_HANDLED;
}
@@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
}
}
+static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
+{
+ unsigned int i;
+
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
+
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
- void __iomem *irq_en_base;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
- iowrite32(irq_mask->all, irq_en_base);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
@@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
- /* Disable PCI interrupts */
- iowrite32(0, irq_en_base);
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
err_pci_free_irq_vectors:
@@ -1844,35 +1875,26 @@ err_disable_pci:
return ret;
}
-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
-{
- int i;
-
- for (i = 0; i < pcie->nr_channels; i++) {
- struct kvaser_pciefd_can *can = pcie->can[i];
-
- if (can) {
- iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- unregister_candev(can->can.dev);
- timer_delete(&can->bec_poll_timer);
- kvaser_pciefd_pwm_stop(can);
- free_candev(can->can.dev);
- }
- }
-}
-
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+ unsigned int i;
- kvaser_pciefd_remove_all_ctrls(pcie);
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
- /* Disable interrupts */
- iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ unregister_candev(can->can.dev);
+ timer_delete(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ }
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
pci_free_irq_vectors(pcie->pci);
+
+ for (i = 0; i < pcie->nr_channels; ++i)
+ free_candev(pcie->can[i]->can.dev);
+
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index c2c116ce1087..6c656bfdb323 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1372,7 +1372,7 @@ static int m_can_set_bittiming(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
const struct can_bittiming *bt = &cdev->can.bittiming;
- const struct can_bittiming *dbt = &cdev->can.data_bittiming;
+ const struct can_bittiming *dbt = &cdev->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1738,7 +1738,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
@@ -1746,13 +1746,13 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
if (err)
return err;
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.fd.data_bittiming_const = &m_can_data_bittiming_const_31X;
niso = m_can_niso_supported(cdev);
if (niso < 0)
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 28f3fd805273..77292afaed22 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -624,7 +624,7 @@ static int peak_canfd_set_data_bittiming(struct net_device *ndev)
{
struct peak_canfd_priv *priv = netdev_priv(ndev);
- return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
+ return pucan_set_timing_fast(priv, &priv->can.fd.data_bittiming);
}
static int peak_canfd_close(struct net_device *ndev)
@@ -813,12 +813,12 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
/* complete now socket-can initialization side */
priv->can.state = CAN_STATE_STOPPED;
priv->can.bittiming_const = &peak_canfd_nominal_const;
- priv->can.data_bittiming_const = &peak_canfd_data_const;
+ priv->can.fd.data_bittiming_const = &peak_canfd_data_const;
priv->can.do_set_mode = peak_canfd_set_mode;
priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
priv->can.do_set_bittiming = peak_canfd_set_bittiming;
- priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
+ priv->can.fd.do_set_data_bittiming = peak_canfd_set_data_bittiming;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES |
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index aa3df0d05b85..7f10213738e5 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,6 +21,7 @@
* wherever it is modified to a readable name.
*/
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/can/dev.h>
@@ -74,33 +75,24 @@
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
-#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
+#define RCANFD_GERFL_EEF GENMASK(23, 16)
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
- RCANFD_GERFL_MES | \
- ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ ((x) & ((FIELD_PREP(RCANFD_GERFL_EEF, (_gpriv)->channels_mask)) | \
+ RCANFD_GERFL_MES | ((_gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))); \
+})
/* AFL Rx rules registers */
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
- (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
-
-#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
- reg_gen4(gpriv, 0x1ff, 0xff))
-
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, page_num) ((page_num) & (gpriv)->info->max_aflpn)
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -118,13 +110,13 @@
/* RSCFDnCFDCmNCFG - CAN FD only */
#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
+ (((x) & ((gpriv)->info->nom_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->ntseg2)
#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
+ (((x) & ((gpriv)->info->nom_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->ntseg1)
#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
+ (((x) & ((gpriv)->info->nom_bittiming->sjw_max - 1)) << (gpriv)->info->sh->nsjw)
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
@@ -186,13 +178,13 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
+#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & ((gpriv)->info->data_bittiming->sjw_max - 1)) << 24)
#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
+ (((x) & ((gpriv)->info->data_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->dtseg2)
#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
+ (((x) & ((gpriv)->info->data_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->dtseg1)
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
@@ -233,11 +225,14 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) \
- (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, cftml) \
+({\
+ typeof(gpriv) (_gpriv) = (gpriv); \
+ (((cftml) & (_gpriv)->info->max_cftml) << (_gpriv)->info->sh->cftml); \
+})
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << (gpriv)->info->sh->cfm)
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << (gpriv)->info->sh->cfdc)
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -298,14 +293,14 @@
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
-#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
+#define RCANFD_GAFLCFG(w) (0x009c + (0x04 * (w)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) ((gpriv)->info->regs->rfcc + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -315,13 +310,13 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfcc + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfsts + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
+ ((gpriv)->info->regs->cfpctr + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -437,7 +432,7 @@
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
+#define RCANFD_F_DCFG(gpriv, m) ((gpriv)->info->regs->f_dcfg + (0x20 * (m)))
#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
@@ -453,7 +448,7 @@
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) ((gpriv)->info->regs->rfoffset)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +456,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) ((gpriv)->info->regs->cfoffset)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -510,12 +505,43 @@
struct rcar_canfd_global;
+struct rcar_canfd_regs {
+ u16 rfcc; /* RX FIFO Configuration/Control Register */
+ u16 cfcc; /* Common FIFO Configuration/Control Register */
+ u16 cfsts; /* Common FIFO Status Register */
+ u16 cfpctr; /* Common FIFO Pointer Control Register */
+ u16 f_dcfg; /* Global FD Configuration Register */
+ u16 rfoffset; /* Receive FIFO buffer access ID register */
+ u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
+};
+
+struct rcar_canfd_shift_data {
+ u8 ntseg2; /* Nominal Bit Rate Time Segment 2 Control */
+ u8 ntseg1; /* Nominal Bit Rate Time Segment 1 Control */
+ u8 nsjw; /* Nominal Bit Rate Resynchronization Jump Width Control */
+ u8 dtseg2; /* Data Bit Rate Time Segment 2 Control */
+ u8 dtseg1; /* Data Bit Rate Time Segment 1 Control */
+ u8 cftml; /* Common FIFO TX Message Buffer Link */
+ u8 cfm; /* Common FIFO Mode */
+ u8 cfdc; /* Common FIFO Depth Configuration */
+};
+
struct rcar_canfd_hw_info {
+ const struct can_bittiming_const *nom_bittiming;
+ const struct can_bittiming_const *data_bittiming;
+ const struct rcar_canfd_regs *regs;
+ const struct rcar_canfd_shift_data *sh;
+ u8 rnc_field_width;
+ u8 max_aflpn;
+ u8 max_cftml;
u8 max_channels;
u8 postdiv;
/* hardware features */
unsigned shared_global_irqs:1; /* Has shared global irqs */
unsigned multi_channel_irqs:1; /* Has multiple channel irqs */
+ unsigned ch_interface_mode:1; /* Has channel interface mode */
+ unsigned shared_can_regs:1; /* Has shared classical can registers */
+ unsigned external_clk:1; /* Has external clock */
};
/* Channel priv data */
@@ -548,7 +574,7 @@ struct rcar_canfd_global {
};
/* CAN FD mode nominal rate constants */
-static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_nom_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 128,
@@ -560,8 +586,20 @@ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 2,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* CAN FD mode data rate constants */
-static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+static const struct can_bittiming_const rcar_canfd_gen3_data_bittiming_const = {
.name = RCANFD_DRV_NAME,
.tseg1_min = 2,
.tseg1_max = 16,
@@ -573,6 +611,18 @@ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const rcar_canfd_gen4_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 32,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/* Classical CAN mode bitrate constants */
static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.name = RCANFD_DRV_NAME,
@@ -586,36 +636,113 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+static const struct rcar_canfd_regs rcar_gen3_regs = {
+ .rfcc = 0x00b8,
+ .cfcc = 0x0118,
+ .cfsts = 0x0178,
+ .cfpctr = 0x01d8,
+ .f_dcfg = 0x0500,
+ .rfoffset = 0x3000,
+ .cfoffset = 0x3400,
+};
+
+static const struct rcar_canfd_regs rcar_gen4_regs = {
+ .rfcc = 0x00c0,
+ .cfcc = 0x0120,
+ .cfsts = 0x01e0,
+ .cfpctr = 0x0240,
+ .f_dcfg = 0x1400,
+ .rfoffset = 0x6000,
+ .cfoffset = 0x6400,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen3_shift_data = {
+ .ntseg2 = 24,
+ .ntseg1 = 16,
+ .nsjw = 11,
+ .dtseg2 = 20,
+ .dtseg1 = 16,
+ .cftml = 20,
+ .cfm = 16,
+ .cfdc = 8,
+};
+
+static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
+ .ntseg2 = 25,
+ .ntseg1 = 17,
+ .nsjw = 10,
+ .dtseg2 = 16,
+ .dtseg1 = 8,
+ .cftml = 16,
+ .cfm = 8,
+ .cfdc = 21,
+};
+
static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 127,
+ .max_cftml = 31,
.max_channels = 8,
.postdiv = 2,
.shared_global_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 1,
};
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .regs = &rcar_gen3_regs,
+ .sh = &rcar_gen3_shift_data,
+ .rnc_field_width = 8,
+ .max_aflpn = 31,
+ .max_cftml = 15,
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
+ .ch_interface_mode = 0,
+ .shared_can_regs = 0,
+ .external_clk = 1,
};
-/* Helper functions */
-static inline bool is_gen4(struct rcar_canfd_global *gpriv)
-{
- return gpriv->info == &rcar_gen4_hw_info;
-}
-
-static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
- u32 gen4, u32 not_gen4)
-{
- return is_gen4(gpriv) ? gen4 : not_gen4;
-}
+static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
+ .nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
+ .data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .regs = &rcar_gen4_regs,
+ .sh = &rcar_gen4_shift_data,
+ .rnc_field_width = 16,
+ .max_aflpn = 63,
+ .max_cftml = 31,
+ .max_channels = 6,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+ .ch_interface_mode = 1,
+ .shared_can_regs = 1,
+ .external_clk = 0,
+};
+/* Helper functions */
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -681,9 +808,20 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_setrnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
+{
+ unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
+ unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
+ unsigned int w = ch / rnc_stride;
+ u32 rnc = num_rules << shift;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
+}
+
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
{
- if (is_gen4(gpriv)) {
+ if (gpriv->info->ch_interface_mode) {
u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
: RCANFD_GEN4_FDCFG_CLOE;
@@ -789,7 +927,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
u32 ch, u32 rule_entry)
{
- int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ unsigned int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -800,9 +938,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
- RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_gen4(gpriv))
+ rcar_canfd_setrnc(gpriv, ch, num_rules);
+ if (gpriv->info->shared_can_regs)
offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
@@ -942,7 +1079,7 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ if (gerfl & FIELD_PREP(RCANFD_GERFL_EEF, BIT(ch))) {
netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
@@ -1304,7 +1441,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
struct rcar_canfd_channel *priv = netdev_priv(dev);
struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 cfg;
u32 ch = priv->channel;
@@ -1338,7 +1475,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- if (is_gen4(gpriv)) {
+ if (gpriv->info->shared_can_regs) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) |
@@ -1503,7 +1640,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1562,7 +1699,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1613,7 +1750,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_gen4(gpriv))
+ else if (gpriv->info->shared_can_regs)
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1736,16 +1873,19 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
if (info->multi_channel_irqs) {
char *irq_name;
+ char name[10];
int err_irq;
int tx_irq;
- err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err");
+ scnprintf(name, sizeof(name), "ch%u_err", ch);
+ err_irq = platform_get_irq_byname(pdev, name);
if (err_irq < 0) {
err = err_irq;
goto fail;
}
- tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx");
+ scnprintf(name, sizeof(name), "ch%u_trx", ch);
+ tx_irq = platform_get_irq_byname(pdev, name);
if (tx_irq < 0) {
err = tx_irq;
goto fail;
@@ -1782,9 +1922,8 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
}
if (gpriv->fdmode) {
- priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
- priv->can.data_bittiming_const =
- &rcar_canfd_data_bittiming_const;
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
/* Controller starts in CAN FD only mode */
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
@@ -1846,6 +1985,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
+ struct clk *clk_ram;
int i;
info = of_device_get_match_data(dev);
@@ -1855,13 +1995,13 @@ static int rcar_canfd_probe(struct platform_device *pdev)
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(dev->of_node, name);
- if (of_child && of_device_is_available(of_child)) {
+ of_child = of_get_available_child_by_name(dev->of_node, name);
+ if (of_child) {
channels_mask |= BIT(i);
transceivers[i] = devm_of_phy_optional_get(dev,
of_child, NULL);
+ of_node_put(of_child);
}
- of_node_put(of_child);
if (IS_ERR(transceivers[i]))
return PTR_ERR(transceivers[i]);
}
@@ -1932,9 +2072,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
fcan_freq = clk_get_rate(gpriv->can_clk);
- gpriv->extclk = true;
+ gpriv->extclk = gpriv->info->external_clk;
}
+ clk_ram = devm_clk_get_optional_enabled(dev, "ram_clk");
+ if (IS_ERR(clk_ram))
+ return dev_err_probe(dev, PTR_ERR(clk_ram),
+ "cannot get enabled ram clock\n");
+
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
err = PTR_ERR(addr);
@@ -2097,6 +2242,7 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
{ .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,r9a09g047-canfd", .data = &r9a09g047_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
{ .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index c3fb3176ce42..046f0a0ae4d4 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -118,7 +118,7 @@ static void rkcanfd_chip_set_work_mode(const struct rkcanfd_priv *priv)
static int rkcanfd_set_bittiming(struct rkcanfd_priv *priv)
{
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_nbt, reg_dbt, reg_tdc;
u32 tdco;
@@ -899,7 +899,7 @@ static int rkcanfd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->can.clock.freq = clk_get_rate(priv->clks[0].clk);
priv->can.bittiming_const = &rkcanfd_bittiming_const;
- priv->can.data_bittiming_const = &rkcanfd_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &rkcanfd_data_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
priv->can.do_set_mode = rkcanfd_set_mode;
diff --git a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
index 43d4b5721812..fa85a75be65a 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-timestamp.c
@@ -39,7 +39,7 @@ static void rkcanfd_timestamp_work(struct work_struct *work)
void rkcanfd_timestamp_init(struct rkcanfd_priv *priv)
{
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
const struct can_bittiming *bt = &priv->can.bittiming;
struct cyclecounter *cc = &priv->cc;
u32 bitrate, div, reg, rate;
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index 24c6622d36bd..58ff2ec1d975 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
#define SLCAN_CMD_LEN 1
#define SLCAN_SFF_ID_LEN 3
#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_DATA_LENGTH_LEN 1
+#define SLCAN_ERROR_LEN 1
#define SLCAN_STATE_LEN 1
#define SLCAN_STATE_BE_RXCNT_LEN 3
#define SLCAN_STATE_BE_TXCNT_LEN 3
-#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
- SLCAN_STATE_BE_RXCNT_LEN + \
- SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
+ SLCAN_STATE_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_ERROR_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_SFF_ID_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
struct slcan {
struct can_priv can;
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
+ return;
+
skb = alloc_can_skb(sl->dev, &cf);
if (unlikely(!skb)) {
sl->dev->stats.rx_dropped++;
@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
return;
}
- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
int i, len;
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
+ return;
+
/* get len from sanitized ASCII value */
len = cmd[1];
if (len >= '0' && len < '9')
@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 4)
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
slcan_bump(sl);
sl->rcount = 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index c30b04f8fc0d..7450ea42c1ea 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -527,7 +527,7 @@ static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 tdcmod, val = 0;
int err;
@@ -600,8 +600,8 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.fd.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.fd.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
@@ -2104,8 +2104,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
- priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
- priv->can.tdc_const = &mcp251xfd_tdc_const;
+ priv->can.fd.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.fd.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 03ad10b01867..27a3818885c2 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -1098,7 +1098,7 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const;
struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *nom_bt = &priv->can.bittiming;
- struct can_bittiming *data_bt = &priv->can.data_bittiming;
+ struct can_bittiming *data_bt = &priv->can.fd.data_bittiming;
struct esd_usb_3_set_baudrate_msg_x *baud_x;
union esd_usb_msg *msg;
u16 flags = 0;
@@ -1218,9 +1218,9 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
- priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
+ priv->can.fd.data_bittiming_const = &esd_usb_3_data_bittiming_const;
priv->can.do_set_bittiming = esd_usb_3_set_bittiming;
- priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming;
+ priv->can.fd.do_set_data_bittiming = esd_usb_3_set_bittiming;
break;
case ESD_USB_CANUSBM_PRODUCT_ID:
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 71f24dc0a927..db1acf6d504c 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -2059,8 +2059,8 @@ static int es58x_init_priv(struct es58x_device *es58x_dev,
can->bittiming_const = param->bittiming_const;
if (param->ctrlmode_supported & CAN_CTRLMODE_FD) {
- can->data_bittiming_const = param->data_bittiming_const;
- can->tdc_const = param->tdc_const;
+ can->fd.data_bittiming_const = param->data_bittiming_const;
+ can->fd.tdc_const = param->tdc_const;
}
can->bitrate_max = param->bitrate_max;
can->clock = param->clock;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 84ffa1839bac..d924b053677b 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -427,12 +427,12 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
if (tx_conf_msg.canfd_enabled) {
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
- &priv->can.data_bittiming);
+ &priv->can.fd.data_bittiming);
if (can_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
- tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
- tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
+ tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
+ tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
}
conf_len = ES58X_FD_CANFD_CONF_LEN;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 3ccac6781b98..bb6335278e46 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -728,7 +728,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
struct gs_device_bittiming dbt = {
.prop_seg = cpu_to_le32(bt->prop_seg),
.phase_seg1 = cpu_to_le32(bt->phase_seg1),
@@ -1300,8 +1300,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* The data bit timing will be overwritten, if
* GS_CAN_FEATURE_BT_CONST_EXT is set.
*/
- dev->can.data_bittiming_const = &dev->bt_const;
- dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = &dev->bt_const;
+ dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
if (feature & GS_CAN_FEATURE_TERMINATION) {
@@ -1381,7 +1381,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
- dev->can.data_bittiming_const = &dev->data_bt_const;
+ dev->can.fd.data_bittiming_const = &dev->data_bt_const;
}
can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 078496d9b7ba..f6c77eca9f43 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -137,7 +137,7 @@ struct kvaser_usb_net_priv {
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
* @dev_get_busparams: readback arbitration busparams
- * @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_set_data_bittiming: used for can.fd.do_set_data_bittiming
* @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index dcb0bcbe0565..daf42080f942 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -592,7 +592,7 @@ static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
struct kvaser_usb_busparams busparams;
int tseg1 = dbt->prop_seg + dbt->phase_seg1;
int tseg2 = dbt->phase_seg2;
@@ -842,8 +842,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
- priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
+ priv->can.fd.data_bittiming_const = dev->cfg->data_bittiming_const;
+ priv->can.fd.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 59f7cd8ceb39..117637b9b995 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -770,7 +770,7 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
const struct peak_usb_adapter *pa = dev->adapter;
if (pa->dev_set_data_bittiming) {
- struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
int err = pa->dev_set_data_bittiming(dev, bt);
if (err)
@@ -954,8 +954,8 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev->can.clock = peak_usb_adapter->clock;
dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
- dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
+ dev->can.fd.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
+ dev->can.fd.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 436c0e4b0344..3f2e378199ab 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -481,7 +481,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -517,10 +517,10 @@ static int xcan_set_bittiming(struct net_device *ndev)
btr0 = dbt->brp - 1;
if (can_tdc_is_enabled(&priv->can)) {
if (priv->devtype.cantype == XAXI_CANFD)
- btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
else
- btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
}
@@ -1967,22 +1967,22 @@ static int xcan_probe(struct platform_device *pdev)
goto err_free;
if (devtype->cantype == XAXI_CANFD) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
- priv->can.tdc_const = &xcan_tdc_const_canfd;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd;
}
if (devtype->cantype == XAXI_CANFD_2_0) {
- priv->can.data_bittiming_const =
+ priv->can.fd.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
- priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ priv->can.fd.tdc_const = &xcan_tdc_const_canfd2;
}
if (devtype->cantype == XAXI_CANFD ||
devtype->cantype == XAXI_CANFD_2_0) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_TDC_AUTO;
- priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ priv->can.fd.do_get_auto_tdcv = xcan_get_auto_tdcv;
}
priv->reg_base = addr;
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 9eb39cfa5fb2..132683ed3abe 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/platform_data/b53.h>
#include <linux/phy.h>
@@ -326,6 +327,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
}
}
+static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
+{
+ u64 eap_conf;
+
+ if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
+ return;
+
+ b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
+
+ if (is63xx(dev)) {
+ eap_conf &= ~EAP_MODE_MASK_63XX;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
+ } else {
+ eap_conf &= ~EAP_MODE_MASK;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT;
+ }
+
+ b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
+}
+
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
@@ -586,6 +607,13 @@ int b53_setup_port(struct dsa_switch *ds, int port)
b53_port_set_mcast_flood(dev, port, true);
b53_port_set_learning(dev, port, false);
+ /* Force all traffic to go to the CPU port to prevent the ASIC from
+ * trying to forward to bridged ports on matching FDB entries, then
+ * dropping frames because it isn't allowed to forward there.
+ */
+ if (dsa_is_user_port(ds, port))
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
return 0;
}
EXPORT_SYMBOL(b53_setup_port);
@@ -1175,6 +1203,10 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_vlan_aware_bridge_pvid = true;
+ /* Ageing time is set in seconds */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
@@ -2042,6 +2074,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
pvlan |= BIT(i);
}
+ /* Disable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
+
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
@@ -2077,6 +2112,9 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
pvlan &= ~BIT(i);
}
+ /* Enable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
@@ -2373,6 +2411,28 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
return B53_MAX_MTU;
}
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct b53_device *dev = ds->priv;
+ u32 atc;
+ int reg;
+
+ if (is63xx(dev))
+ reg = B53_AGING_TIME_CONTROL_63XX;
+ else
+ reg = B53_AGING_TIME_CONTROL;
+
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
+
+ if (!is5325(dev) && !is5365(dev))
+ atc |= AGE_CHANGE;
+
+ b53_write32(dev, B53_MGMT_PAGE, reg, atc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(b53_set_ageing_time);
+
static const struct phylink_mac_ops b53_phylink_mac_ops = {
.mac_select_pcs = b53_phylink_mac_select_pcs,
.mac_config = b53_phylink_mac_config,
@@ -2396,6 +2456,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_disable = b53_disable_port,
.support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 2cf3e6a81e37..a5ef7071ba07 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -343,6 +343,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index bfbcb66bef66..1fbc5a204bc7 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -50,6 +50,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EAP Registers */
+#define B53_EAP_PAGE 0x42
+
/* EEE Control Registers Page */
#define B53_EEE_PAGE 0x92
@@ -217,6 +220,13 @@
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
+/* Aging Time control register (32 bit) */
+#define B53_AGING_TIME_CONTROL 0x06
+#define B53_AGING_TIME_CONTROL_63XX 0x08
+#define AGE_CHANGE BIT(20)
+#define AGE_TIME_MASK 0x7ffff
+#define AGE_TIME_MAX 1048575
+
/* Mirror capture control register (16 bit) */
#define B53_MIR_CAP_CTL 0x10
#define CAP_PORT_MASK 0xf
@@ -481,6 +491,17 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EAP Page Registers
+ *************************************************************************/
+#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
+#define EAP_MODE_SHIFT 51
+#define EAP_MODE_SHIFT_63XX 50
+#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
+#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
+#define EAP_MODE_BASIC 0
+#define EAP_MODE_SIMPLIFIED 3
+
+/*************************************************************************
* EEE Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 454a8c7fd7ee..960685596093 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1235,6 +1235,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.port_disable = bcm_sf2_port_disable,
.support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
+ .set_ageing_time = b53_set_ageing_time,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_pre_bridge_flags = b53_br_flags_pre,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index adbab544c60f..d8a35f25a4c8 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -405,7 +405,7 @@ static int __init dsa_loop_init(void)
unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
- phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
+ phydevs[i] = fixed_phy_register(&status, NULL);
ret = mdio_driver_register(&dsa_loop_drv);
if (ret)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9c2ed2ba79da..bebf0d3ff330 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -244,7 +244,7 @@ struct hellcreek_port_hwtstamp {
struct sk_buff *tx_skb;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
struct hellcreek_port {
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index ca2500aba96f..99941ff1ebf9 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -40,7 +40,7 @@ int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
* the user requested what is actually available or not
*/
static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek_port_hwtstamp *ps =
&hellcreek->ports[port].port_hwtstamp;
@@ -110,41 +110,35 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
}
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config config;
int err;
ps = &hellcreek->ports[port].port_hwtstamp;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct hellcreek *hellcreek = ds->priv;
struct hellcreek_port_hwtstamp *ps;
- struct hwtstamp_config *config;
ps = &hellcreek->ports[port].port_hwtstamp;
- config = &ps->tstamp_config;
+ *config = ps->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 7d88da2134f2..388821c4aa10 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -38,9 +38,10 @@
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 12a86585a77f..c71d3fd5dfeb 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -6,6 +6,7 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
select NET_DSA_TAG_NONE
select NET_IEEE8021Q_HELPERS
select DCB
+ select PCS_XPCS
help
This driver adds support for Microchip KSZ8, KSZ9 and
LAN937X series switch chips, being KSZ8863/8873,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 29fe79ea74cd..d747ea1c41a7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -161,6 +161,190 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
+static void port_sgmii_s(struct ksz_device *dev, uint port, u16 devid, u16 reg)
+{
+ u32 data;
+
+ data = (devid & MII_MMD_CTRL_DEVAD_MASK) << 16;
+ data |= reg;
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_ADDR__4, data);
+}
+
+static void port_sgmii_r(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 *buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pread16(dev, port, REG_PORT_SGMII_DATA__4 + 2, buf);
+}
+
+static void port_sgmii_w(struct ksz_device *dev, uint port, u16 devid, u16 reg,
+ u16 buf)
+{
+ port_sgmii_s(dev, port, devid, reg);
+ ksz_pwrite32(dev, port, REG_PORT_SGMII_DATA__4, buf);
+}
+
+static int ksz9477_pcs_read(struct mii_bus *bus, int phy, int mmd, int reg)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+ u16 val;
+
+ port_sgmii_r(dev, port, mmd, reg, &val);
+
+ /* Simulate a value to activate special code in the XPCS driver if
+ * supported.
+ */
+ if (mmd == MDIO_MMD_PMAPMD) {
+ if (reg == MDIO_DEVID1)
+ val = 0x9477;
+ else if (reg == MDIO_DEVID2)
+ val = 0x22 << 10;
+ } else if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ /* Need to update MII_BMCR register with the exact speed and
+ * duplex mode when running in SGMII mode and this register is
+ * used to detect connected speed in that mode.
+ */
+ if (reg == MMD_SR_MII_AUTO_NEG_STATUS) {
+ int duplex, speed;
+
+ if (val & SR_MII_STAT_LINK_UP) {
+ speed = (val >> SR_MII_STAT_S) & SR_MII_STAT_M;
+ if (speed == SR_MII_STAT_1000_MBPS)
+ speed = SPEED_1000;
+ else if (speed == SR_MII_STAT_100_MBPS)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
+ if (val & SR_MII_STAT_FULL_DUPLEX)
+ duplex = DUPLEX_FULL;
+ else
+ duplex = DUPLEX_HALF;
+
+ if (!p->phydev.link ||
+ p->phydev.speed != speed ||
+ p->phydev.duplex != duplex) {
+ u16 ctrl;
+
+ p->phydev.link = 1;
+ p->phydev.speed = speed;
+ p->phydev.duplex = duplex;
+ port_sgmii_r(dev, port, mmd, MII_BMCR,
+ &ctrl);
+ ctrl &= BMCR_ANENABLE;
+ ctrl |= mii_bmcr_encode_fixed(speed,
+ duplex);
+ port_sgmii_w(dev, port, mmd, MII_BMCR,
+ ctrl);
+ }
+ } else {
+ p->phydev.link = 0;
+ }
+ } else if (reg == MII_BMSR) {
+ p->phydev.link = (val & BMSR_LSTATUS);
+ }
+ }
+
+ return val;
+}
+
+static int ksz9477_pcs_write(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+ int port = ksz_get_sgmii_port(dev);
+
+ if (mmd == MDIO_MMD_VEND2) {
+ struct ksz_port *p = &dev->ports[port];
+
+ if (reg == MMD_SR_MII_AUTO_NEG_CTRL) {
+ u16 sgmii_mode = SR_MII_PCS_SGMII << SR_MII_PCS_MODE_S;
+
+ /* Need these bits for 1000BASE-X mode to work with
+ * AN on.
+ */
+ if (!(val & sgmii_mode))
+ val |= SR_MII_SGMII_LINK_UP |
+ SR_MII_TX_CFG_PHY_MASTER;
+
+ /* SGMII interrupt in the port cannot be masked, so
+ * make sure interrupt is not enabled as it is not
+ * handled.
+ */
+ val &= ~SR_MII_AUTO_NEG_COMPLETE_INTR;
+ } else if (reg == MII_BMCR) {
+ /* The MII_ADVERTISE register needs to write once
+ * before doing auto-negotiation for the correct
+ * config_word to be sent out after reset.
+ */
+ if ((val & BMCR_ANENABLE) && !p->sgmii_adv_write) {
+ u16 adv;
+
+ /* The SGMII port cannot disable flow control
+ * so it is better to just advertise symmetric
+ * pause.
+ */
+ port_sgmii_r(dev, port, mmd, MII_ADVERTISE,
+ &adv);
+ adv |= ADVERTISE_1000XPAUSE;
+ adv &= ~ADVERTISE_1000XPSE_ASYM;
+ port_sgmii_w(dev, port, mmd, MII_ADVERTISE,
+ adv);
+ p->sgmii_adv_write = 1;
+ } else if (val & BMCR_RESET) {
+ p->sgmii_adv_write = 0;
+ }
+ } else if (reg == MII_ADVERTISE) {
+ /* XPCS driver writes to this register so there is no
+ * need to update it for the errata.
+ */
+ p->sgmii_adv_write = 1;
+ }
+ }
+ port_sgmii_w(dev, port, mmd, reg, val);
+
+ return 0;
+}
+
+int ksz9477_pcs_create(struct ksz_device *dev)
+{
+ /* This chip has a SGMII port. */
+ if (ksz_has_sgmii_port(dev)) {
+ int port = ksz_get_sgmii_port(dev);
+ struct ksz_port *p = &dev->ports[port];
+ struct phylink_pcs *pcs;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(dev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ksz_pcs_mdio_bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(dev->dev));
+ bus->read_c45 = &ksz9477_pcs_read;
+ bus->write_c45 = &ksz9477_pcs_write;
+ bus->parent = dev->dev;
+ bus->phy_mask = ~0;
+ bus->priv = dev;
+
+ ret = devm_mdiobus_register(dev->dev, bus);
+ if (ret)
+ return ret;
+
+ pcs = xpcs_create_pcs_mdiodev(bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+ p->pcs = pcs;
+ }
+
+ return 0;
+}
+
int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
@@ -978,6 +1162,14 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
+
+ if (ksz_is_sgmii_port(dev, port)) {
+ struct ksz_port *p = &dev->ports[port];
+
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
+ p->pcs->supported_interfaces);
+ }
}
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index d2166b0d881e..0d1a6dfda23e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 series Header file
*
- * Copyright (C) 2017-2022 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_H
@@ -97,4 +97,6 @@ void ksz9477_acl_match_process_l2(struct ksz_device *dev, int port,
u16 ethtype, u8 *src_mac, u8 *dst_mac,
unsigned long cookie, u32 prio);
+int ksz9477_pcs_create(struct ksz_device *dev);
+
#endif
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 89f0796894af..7c142c17b3f6 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -265,16 +265,70 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface);
+/**
+ * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ *
+ * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
+ * a detailed explanation of EEE/LPI handling in KSZ switches.
+ */
+static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+}
+
+/**
+ * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ * @timer: timer value before entering LPI (unused)
+ * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
+ *
+ * This function signals to phylink that the driver architecture supports
+ * LPI management, enabling phylink to control EEE advertisement during
+ * negotiation according to IEEE Std 802.3 (Clause 78).
+ *
+ * Hardware Management of EEE/LPI State:
+ * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
+ * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
+ * state transitions are managed autonomously by the hardware based on
+ * the auto-negotiation results. (Note: While the datasheet describes EEE
+ * operation based on negotiation, it doesn't explicitly detail the internal
+ * MAC/PHY interaction, so autonomous hardware management of the MAC state
+ * for LPI is inferred from observed behavior).
+ * This hardware control, consistent with the switch's ability to operate
+ * autonomously via strapping, means MAC-level software intervention is not
+ * required or exposed for managing the LPI state once EEE is negotiated.
+ * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
+ * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
+ * Straps).
+ *
+ * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
+ * lack documented MAC-level LPI control.
+ *
+ * Therefore, this callback performs no action and serves primarily to inform
+ * phylink of LPI awareness and to document the inferred hardware behavior.
+ *
+ * Returns: 0 (Always success)
+ */
+static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ return 0;
+}
+
static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
.mac_config = ksz88x3_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops ksz88xx_dev_ops = {
@@ -354,10 +408,29 @@ static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
int speed, int duplex, bool tx_pause,
bool rx_pause);
+static struct phylink_pcs *
+ksz_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ struct ksz_port *p = &dev->ports[dp->index];
+
+ if (ksz_is_sgmii_port(dev, dp->index) &&
+ (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX))
+ return p->pcs;
+
+ return NULL;
+}
+
static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
+ .mac_select_pcs = ksz_phylink_mac_select_pcs,
};
static const struct ksz_dev_ops ksz9477_dev_ops = {
@@ -395,12 +468,15 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
.exit = ksz9477_switch_exit,
+ .pcs_create = ksz9477_pcs_create,
};
static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops lan937x_dev_ops = {
@@ -1035,8 +1111,7 @@ static const struct regmap_range ksz9477_valid_regs[] = {
regmap_reg_range(0x701b, 0x701b),
regmap_reg_range(0x701f, 0x7020),
regmap_reg_range(0x7030, 0x7030),
- regmap_reg_range(0x7200, 0x7203),
- regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7200, 0x7207),
regmap_reg_range(0x7300, 0x7301),
regmap_reg_range(0x7400, 0x7401),
regmap_reg_range(0x7403, 0x7403),
@@ -1552,6 +1627,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
.ptp_capable = true,
+ .sgmii_port = 7,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1944,6 +2020,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .sgmii_port = 7,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -2016,6 +2093,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
+
+ if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ config->lpi_capabilities = MAC_100FD;
+ if (dev->info->gbit_capable[port])
+ config->lpi_capabilities |= MAC_1000FD;
+
+ /* EEE is fully operational */
+ config->eee_enabled_default = true;
+ }
}
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2067,7 +2156,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
spin_unlock(&mib->stats64_lock);
- if (dev->info->phy_errata_9477) {
+ if (dev->info->phy_errata_9477 && !ksz_is_sgmii_port(dev, port)) {
ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
if (ret)
dev_err(dev->dev, "Failed to monitor transmission halt\n");
@@ -2697,8 +2786,9 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
kirq->dev = dev;
kirq->masked = ~0;
- kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
- &ksz_irq_domain_ops, kirq);
+ kirq->domain = irq_domain_create_simple(of_fwnode_handle(dev->dev->of_node),
+ kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
@@ -2775,6 +2865,12 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ if (ksz_has_sgmii_port(dev) && dev->dev_ops->pcs_create) {
+ ret = dev->dev_ops->pcs_create(dev);
+ if (ret)
+ return ret;
+ }
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
@@ -3008,31 +3104,6 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
- case KSZ8567_CHIP_ID:
- /* KSZ8567R Errata DS80000752C Module 4 */
- case KSZ8765_CHIP_ID:
- case KSZ8794_CHIP_ID:
- case KSZ8795_CHIP_ID:
- /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
- case KSZ9477_CHIP_ID:
- /* KSZ9477S Errata DS80000754A Module 4 */
- case KSZ9567_CHIP_ID:
- /* KSZ9567S Errata DS80000756A Module 4 */
- case KSZ9896_CHIP_ID:
- /* KSZ9896C Errata DS80000757A Module 3 */
- case KSZ9897_CHIP_ID:
- case LAN9646_CHIP_ID:
- /* KSZ9897R Errata DS80000758C Module 4 */
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled
- * The EEE feature is enabled by default, but it is not fully
- * operational. It must be manually disabled through register
- * controls. If not disabled, the PHY ports can auto-negotiate
- * to enable EEE, and this feature can cause link drops when
- * linked to another device supporting EEE.
- *
- * The same item appears in the errata for all switches above.
- */
- return MICREL_NO_EEE;
}
return 0;
@@ -3466,6 +3537,20 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
+/**
+ * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
+ * port
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number to check
+ *
+ * This function also documents devices where EEE was initially advertised but
+ * later withdrawn due to reliability issues, as described in official errata
+ * documents. These devices are explicitly listed to record known limitations,
+ * even if there is no technical necessity for runtime checks.
+ *
+ * Returns: true if the internal PHY on the given port supports fully
+ * operational EEE, false otherwise.
+ */
static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
@@ -3475,15 +3560,35 @@ static bool ksz_support_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ return true;
case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
case KSZ9477_CHIP_ID:
- case KSZ9563_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
case KSZ9567_CHIP_ID:
- case KSZ9893_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
case KSZ9897_CHIP_ID:
case LAN9646_CHIP_ID:
- return true;
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be
+ * manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for all switches above.
+ */
+ break;
}
return false;
@@ -3613,6 +3718,10 @@ static void ksz_phylink_mac_config(struct phylink_config *config,
if (dev->info->internal_phy[port])
return;
+ /* No need to configure XMII control register when using SGMII. */
+ if (ksz_is_sgmii_port(dev, port))
+ return;
+
if (phylink_autoneg_inband(mode)) {
dev_err(dev->dev, "In-band AN not supported!\n");
return;
@@ -3999,6 +4108,89 @@ static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
return p->bands - 1 - band;
}
+/**
+ * ksz88x3_tc_ets_add - Configure ETS (Enhanced Transmission Selection)
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to configure
+ * @p: Pointer to offload replace parameters describing ETS bands and mapping
+ *
+ * The KSZ88x3 supports two scheduling modes: Strict Priority and
+ * Weighted Fair Queuing (WFQ). Both modes have fixed behavior:
+ * - No configurable queue-to-priority mapping
+ * - No weight adjustment in WFQ mode
+ *
+ * This function configures the switch to use strict priority mode by
+ * clearing the WFQ enable bit for all queues associated with ETS bands.
+ * If strict priority is not explicitly requested, the switch will default
+ * to WFQ mode.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band;
+
+ /* Only strict priority mode is supported for now.
+ * WFQ is implicitly enabled when strict mode is disabled.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+
+ /* Clear WFQ enable bit to select strict priority scheduling */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_tc_ets_del - Reset ETS (Enhanced Transmission Selection) config
+ * for a port on KSZ88x3 switch
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number to reset
+ *
+ * The KSZ88x3 supports only fixed scheduling modes: Strict Priority or
+ * Weighted Fair Queuing (WFQ), with no reconfiguration of weights or
+ * queue mapping. This function resets the port’s scheduling mode to
+ * the default, which is WFQ, by enabling the WFQ bit for all queues.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue;
+
+ /* Iterate over all transmit queues for this port */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ u8 reg;
+
+ /* Calculate TXQ Split Control register address for this
+ * port/queue
+ */
+ reg = KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue);
+
+ /* Set WFQ enable bit to revert back to default scheduling
+ * mode
+ */
+ ret = ksz_rmw8(dev, reg, KSZ8873_TXQ_WFQ_ENABLE,
+ KSZ8873_TXQ_WFQ_ENABLE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
{
int ret;
@@ -4080,6 +4272,7 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port)
for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
ret = ksz_queue_set_wrr(dev, port, queue,
KSZ9477_DEFAULT_WRR_WEIGHT);
+
if (ret)
return ret;
}
@@ -4132,7 +4325,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (is_ksz8(dev))
+ if (is_ksz8(dev) && !ksz_is_ksz88x3(dev))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -4146,9 +4339,16 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
if (ret)
return ret;
- return ksz_tc_ets_add(dev, port, &qopt->replace_params);
+ if (ksz_is_ksz88x3(dev))
+ return ksz88x3_tc_ets_add(dev, port,
+ &qopt->replace_params);
+ else
+ return ksz_tc_ets_add(dev, port, &qopt->replace_params);
case TC_ETS_DESTROY:
- return ksz_tc_ets_del(dev, port);
+ if (ksz_is_ksz88x3(dev))
+ return ksz88x3_tc_ets_del(dev, port);
+ else
+ return ksz_tc_ets_del(dev, port);
case TC_ETS_STATS:
case TC_ETS_GRAFT:
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index af17a9c030d4..a08417df2ca4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
@@ -93,6 +94,7 @@ struct ksz_chip_data {
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
bool ptp_capable;
+ u8 sgmii_port;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -132,6 +134,7 @@ struct ksz_port {
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
+ u32 sgmii_adv_write:1;
struct ksz_port_mib mib;
phy_interface_t interface;
@@ -141,8 +144,9 @@ struct ksz_port {
void *acl_priv;
struct ksz_irq pirq;
u8 num;
+ struct phylink_pcs *pcs;
#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
bool hwts_tx_en;
bool hwts_rx_en;
struct ksz_irq ptpirq;
@@ -440,6 +444,8 @@ struct ksz_dev_ops {
int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
+
+ int (*pcs_create)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
@@ -731,6 +737,21 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
}
+static inline int ksz_get_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port - 1;
+}
+
+static inline bool ksz_has_sgmii_port(struct ksz_device *dev)
+{
+ return dev->info->sgmii_port > 0;
+}
+
+static inline bool ksz_is_sgmii_port(struct ksz_device *dev, int port)
+{
+ return dev->info->sgmii_port == port + 1;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
@@ -836,6 +857,25 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define SW_HI_SPEED_DRIVE_STRENGTH_S 4
#define SW_LO_SPEED_DRIVE_STRENGTH_S 0
+/* TXQ Split Control Register for per-port, per-queue configuration.
+ * Register 0xAF is TXQ Split for Q3 on Port 1.
+ * Register offset formula: 0xAF + (port * 4) + (3 - queue)
+ * where: port = 0..2, queue = 0..3
+ */
+#define KSZ8873_TXQ_SPLIT_CTRL_REG(port, queue) \
+ (0xAF + ((port) * 4) + (3 - (queue)))
+
+/* Bit 7 selects between:
+ * 0 = Strict priority mode (highest-priority queue first)
+ * 1 = Weighted Fair Queuing (WFQ) mode:
+ * Queue weights: Q3:Q2:Q1:Q0 = 8:4:2:1
+ * If any queues are empty, weight is redistributed.
+ *
+ * Note: This is referred to as "Weighted Fair Queuing" (WFQ) in KSZ8863/8873
+ * documentation, and as "Weighted Round Robin" (WRR) in KSZ9477 family docs.
+ */
+#define KSZ8873_TXQ_WFQ_ENABLE BIT(7)
+
#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
#define KSZ9477_OUT_RATE_NO_LIMIT 0
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 22fb9ef4645c..8ab664e85f13 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -319,22 +319,21 @@ int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_in
return 0;
}
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config *config;
struct ksz_port *prt;
prt = &dev->ports[port];
- config = &prt->tstamp_config;
+ *config = prt->tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
static int ksz_set_hwtstamp_config(struct ksz_device *dev,
struct ksz_port *prt,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
int ret;
@@ -404,26 +403,21 @@ static int ksz_set_hwtstamp_config(struct ksz_device *dev,
return ksz_ptp_enable_mode(dev);
}
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ksz_device *dev = ds->priv;
- struct hwtstamp_config config;
struct ksz_port *prt;
int ret;
prt = &dev->ports[port];
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ ret = ksz_set_hwtstamp_config(dev, prt, config);
if (ret)
return ret;
- memcpy(&prt->tstamp_config, &config, sizeof(config));
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ prt->tstamp_config = *config;
return 0;
}
@@ -1136,8 +1130,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
init_completion(&port->tstamp_msg_comp);
- ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
- &ksz_ptp_irq_domain_ops, ptpirq);
+ ptpirq->domain = irq_domain_create_linear(of_fwnode_handle(dev->dev->of_node),
+ ptpirq->nirqs, &ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
index 2f1783c0d723..3086e519b1b6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.h
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -39,8 +39,11 @@ void ksz_ptp_clock_unregister(struct dsa_switch *ds);
int ksz_get_ts_info(struct dsa_switch *ds, int port,
struct kernel_ethtool_ts_info *ts);
-int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
-int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void ksz_port_deferred_xmit(struct kthread_work *work);
bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index 5f2db4317dd3..842d74268e77 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,6 +11,7 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,an7583-switch", .data = &mt753x_table[ID_AN7583], },
{ .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index c5d6628d7980..df213c37b4fe 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -32,47 +32,15 @@ static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
- MIB_DESC(1, 0x00, "TxDrop"),
- MIB_DESC(1, 0x04, "TxCrcErr"),
- MIB_DESC(1, 0x08, "TxUnicast"),
- MIB_DESC(1, 0x0c, "TxMulticast"),
- MIB_DESC(1, 0x10, "TxBroadcast"),
- MIB_DESC(1, 0x14, "TxCollision"),
- MIB_DESC(1, 0x18, "TxSingleCollision"),
- MIB_DESC(1, 0x1c, "TxMultipleCollision"),
- MIB_DESC(1, 0x20, "TxDeferred"),
- MIB_DESC(1, 0x24, "TxLateCollision"),
- MIB_DESC(1, 0x28, "TxExcessiveCollistion"),
- MIB_DESC(1, 0x2c, "TxPause"),
- MIB_DESC(1, 0x30, "TxPktSz64"),
- MIB_DESC(1, 0x34, "TxPktSz65To127"),
- MIB_DESC(1, 0x38, "TxPktSz128To255"),
- MIB_DESC(1, 0x3c, "TxPktSz256To511"),
- MIB_DESC(1, 0x40, "TxPktSz512To1023"),
- MIB_DESC(1, 0x44, "Tx1024ToMax"),
- MIB_DESC(2, 0x48, "TxBytes"),
- MIB_DESC(1, 0x60, "RxDrop"),
- MIB_DESC(1, 0x64, "RxFiltering"),
- MIB_DESC(1, 0x68, "RxUnicast"),
- MIB_DESC(1, 0x6c, "RxMulticast"),
- MIB_DESC(1, 0x70, "RxBroadcast"),
- MIB_DESC(1, 0x74, "RxAlignErr"),
- MIB_DESC(1, 0x78, "RxCrcErr"),
- MIB_DESC(1, 0x7c, "RxUnderSizeErr"),
- MIB_DESC(1, 0x80, "RxFragErr"),
- MIB_DESC(1, 0x84, "RxOverSzErr"),
- MIB_DESC(1, 0x88, "RxJabberErr"),
- MIB_DESC(1, 0x8c, "RxPause"),
- MIB_DESC(1, 0x90, "RxPktSz64"),
- MIB_DESC(1, 0x94, "RxPktSz65To127"),
- MIB_DESC(1, 0x98, "RxPktSz128To255"),
- MIB_DESC(1, 0x9c, "RxPktSz256To511"),
- MIB_DESC(1, 0xa0, "RxPktSz512To1023"),
- MIB_DESC(1, 0xa4, "RxPktSz1024ToMax"),
- MIB_DESC(2, 0xa8, "RxBytes"),
- MIB_DESC(1, 0xb0, "RxCtrlDrop"),
- MIB_DESC(1, 0xb4, "RxIngressDrop"),
- MIB_DESC(1, 0xb8, "RxArlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_DROP, "TxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_CRC_ERR, "TxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_TX_COLLISION, "TxCollision"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_DROP, "RxDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_FILTERING, "RxFiltering"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CRC_ERR, "RxCrcErr"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_CTRL_DROP, "RxCtrlDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_INGRESS_DROP, "RxIngressDrop"),
+ MIB_DESC(1, MT7530_PORT_MIB_RX_ARL_DROP, "RxArlDrop"),
};
static void
@@ -790,23 +758,33 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
}
static void
+mt7530_read_port_stats(struct mt7530_priv *priv, int port,
+ u32 offset, u8 size, uint64_t *data)
+{
+ u32 val, reg = MT7530_PORT_MIB_COUNTER(port) + offset;
+
+ val = mt7530_read(priv, reg);
+ *data = val;
+
+ if (size == 2) {
+ val = mt7530_read(priv, reg + 4);
+ *data |= (u64)val << 32;
+ }
+}
+
+static void
mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct mt7530_priv *priv = ds->priv;
const struct mt7530_mib_desc *mib;
- u32 reg, i;
- u64 hi;
+ int i;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) {
mib = &mt7530_mib[i];
- reg = MT7530_PORT_MIB_COUNTER(port) + mib->offset;
- data[i] = mt7530_read(priv, reg);
- if (mib->size == 2) {
- hi = mt7530_read(priv, reg + 4);
- data[i] |= hi << 32;
- }
+ mt7530_read_port_stats(priv, port, mib->offset, mib->size,
+ data + i);
}
}
@@ -819,6 +797,172 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(mt7530_mib);
}
+static void mt7530_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &mac_stats->FramesTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_SINGLE_COLLISION, 1,
+ &mac_stats->SingleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTIPLE_COLLISION, 1,
+ &mac_stats->MultipleCollisionFrames);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &mac_stats->FramesReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &mac_stats->OctetsTransmittedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_ALIGN_ERR, 1,
+ &mac_stats->AlignmentErrors);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DEFERRED, 1,
+ &mac_stats->FramesWithDeferredXmissions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_LATE_COLLISION, 1,
+ &mac_stats->LateCollisions);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION, 1,
+ &mac_stats->FramesAbortedDueToXSColls);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &mac_stats->OctetsReceivedOK);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &mac_stats->MulticastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->MulticastFramesXmittedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesXmittedOK);
+ mac_stats->FramesTransmittedOK += mac_stats->BroadcastFramesXmittedOK;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &mac_stats->MulticastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->MulticastFramesReceivedOK;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &mac_stats->BroadcastFramesReceivedOK);
+ mac_stats->FramesReceivedOK += mac_stats->BroadcastFramesReceivedOK;
+}
+
+static const struct ethtool_rmon_hist_range mt7530_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, MT7530_MAX_MTU },
+ {}
+};
+
+static void mt7530_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNDER_SIZE_ERR, 1,
+ &rmon_stats->undersize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_OVER_SZ_ERR, 1,
+ &rmon_stats->oversize_pkts);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_FRAG_ERR, 1,
+ &rmon_stats->fragments);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_JABBER_ERR, 1,
+ &rmon_stats->jabbers);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_64, 1,
+ &rmon_stats->hist[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist[5]);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_64, 1,
+ &rmon_stats->hist_tx[0]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127, 1,
+ &rmon_stats->hist_tx[1]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255, 1,
+ &rmon_stats->hist_tx[2]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511, 1,
+ &rmon_stats->hist_tx[3]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023, 1,
+ &rmon_stats->hist_tx[4]);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX, 1,
+ &rmon_stats->hist_tx[5]);
+
+ *ranges = mt7530_rmon_ranges;
+}
+
+static void mt7530_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mt7530_priv *priv = ds->priv;
+ uint64_t data;
+
+ /* MIB counter doesn't provide a FramesTransmittedOK but instead
+ * provide stats for Unicast, Broadcast and Multicast frames separately.
+ * To simulate a global frame counter, read Unicast and addition Multicast
+ * and Broadcast later
+ */
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_UNICAST, 1,
+ &storage->rx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_MULTICAST, 1,
+ &storage->multicast);
+ storage->rx_packets += storage->multicast;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BROADCAST, 1,
+ &data);
+ storage->rx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_UNICAST, 1,
+ &storage->tx_packets);
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_MULTICAST, 1,
+ &data);
+ storage->tx_packets += data;
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BROADCAST, 1,
+ &data);
+ storage->tx_packets += data;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_BYTES, 2,
+ &storage->rx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_BYTES, 2,
+ &storage->tx_bytes);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_DROP, 1,
+ &storage->rx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_DROP, 1,
+ &storage->tx_dropped);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_CRC_ERR, 1,
+ &storage->rx_crc_errors);
+}
+
+static void mt7530_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_TX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesTransmitted);
+
+ mt7530_read_port_stats(priv, port, MT7530_PORT_MIB_RX_PAUSE, 1,
+ &ctrl_stats->MACControlFramesReceived);
+}
+
static int
mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
@@ -1154,7 +1298,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
* is affine to the inbound user port.
*/
if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
- priv->id == ID_EN7581)
+ priv->id == ID_EN7581 || priv->id == ID_AN7583)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -2468,7 +2612,7 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
/* Enable Special Tag for rx frames */
- if (priv->id == ID_EN7581)
+ if (priv->id == ID_EN7581 || priv->id == ID_AN7583)
mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
@@ -3092,6 +3236,16 @@ static int mt7988_setup(struct dsa_switch *ds)
reset_control_deassert(priv->rstc);
usleep_range(20, 50);
+ /* AN7583 require additional tweak to CONN_CFG */
+ if (priv->id == ID_AN7583)
+ mt7530_rmw(priv, AN7583_GEPHY_CONN_CFG,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ AN7583_CSR_ETHER_AFE_PWD,
+ AN7583_CSR_DPHY_CKIN_SEL |
+ AN7583_CSR_PHY_CORE_REG_CLK_SEL |
+ FIELD_PREP(AN7583_CSR_ETHER_AFE_PWD, 0));
+
/* Reset the switch PHYs */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
@@ -3105,6 +3259,10 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
+ .get_eth_mac_stats = mt7530_get_eth_mac_stats,
+ .get_rmon_stats = mt7530_get_rmon_stats,
+ .get_eth_ctrl_stats = mt7530_get_eth_ctrl_stats,
+ .get_stats64 = mt7530_get_stats64,
.set_ageing_time = mt7530_set_ageing_time,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
@@ -3196,6 +3354,16 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = en7581_mac_port_get_caps,
},
+ [ID_AN7583] = {
+ .id = ID_AN7583,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index c3ea403d7acf..7e47cd9af256 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -20,6 +20,7 @@ enum mt753x_id {
ID_MT7531 = 2,
ID_MT7988 = 3,
ID_EN7581 = 4,
+ ID_AN7583 = 5,
};
#define NUM_TRGMII_CTRL 5
@@ -66,7 +67,8 @@ enum mt753x_id {
#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_CFC : MT753X_MFC)
#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
@@ -76,19 +78,22 @@ enum mt753x_id {
#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_MASK : \
MT7530_MIRROR_PORT_MASK)
#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_GET(val) : \
MT7530_MIRROR_PORT_GET(val))
#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
id == ID_MT7988 || \
- id == ID_EN7581) ? \
+ id == ID_EN7581 || \
+ id == ID_AN7583) ? \
MT7531_MIRROR_PORT_SET(val) : \
MT7530_MIRROR_PORT_SET(val))
@@ -423,6 +428,48 @@ enum mt7530_vlan_port_acc_frm {
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+/* Each define is an offset of MT7530_PORT_MIB_COUNTER */
+#define MT7530_PORT_MIB_TX_DROP 0x00
+#define MT7530_PORT_MIB_TX_CRC_ERR 0x04
+#define MT7530_PORT_MIB_TX_UNICAST 0x08
+#define MT7530_PORT_MIB_TX_MULTICAST 0x0c
+#define MT7530_PORT_MIB_TX_BROADCAST 0x10
+#define MT7530_PORT_MIB_TX_COLLISION 0x14
+#define MT7530_PORT_MIB_TX_SINGLE_COLLISION 0x18
+#define MT7530_PORT_MIB_TX_MULTIPLE_COLLISION 0x1c
+#define MT7530_PORT_MIB_TX_DEFERRED 0x20
+#define MT7530_PORT_MIB_TX_LATE_COLLISION 0x24
+#define MT7530_PORT_MIB_TX_EXCESSIVE_COLLISION 0x28
+#define MT7530_PORT_MIB_TX_PAUSE 0x2c
+#define MT7530_PORT_MIB_TX_PKT_SZ_64 0x30
+#define MT7530_PORT_MIB_TX_PKT_SZ_65_TO_127 0x34
+#define MT7530_PORT_MIB_TX_PKT_SZ_128_TO_255 0x38
+#define MT7530_PORT_MIB_TX_PKT_SZ_256_TO_511 0x3c
+#define MT7530_PORT_MIB_TX_PKT_SZ_512_TO_1023 0x40
+#define MT7530_PORT_MIB_TX_PKT_SZ_1024_TO_MAX 0x44
+#define MT7530_PORT_MIB_TX_BYTES 0x48 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_DROP 0x60
+#define MT7530_PORT_MIB_RX_FILTERING 0x64
+#define MT7530_PORT_MIB_RX_UNICAST 0x68
+#define MT7530_PORT_MIB_RX_MULTICAST 0x6c
+#define MT7530_PORT_MIB_RX_BROADCAST 0x70
+#define MT7530_PORT_MIB_RX_ALIGN_ERR 0x74
+#define MT7530_PORT_MIB_RX_CRC_ERR 0x78
+#define MT7530_PORT_MIB_RX_UNDER_SIZE_ERR 0x7c
+#define MT7530_PORT_MIB_RX_FRAG_ERR 0x80
+#define MT7530_PORT_MIB_RX_OVER_SZ_ERR 0x84
+#define MT7530_PORT_MIB_RX_JABBER_ERR 0x88
+#define MT7530_PORT_MIB_RX_PAUSE 0x8c
+#define MT7530_PORT_MIB_RX_PKT_SZ_64 0x90
+#define MT7530_PORT_MIB_RX_PKT_SZ_65_TO_127 0x94
+#define MT7530_PORT_MIB_RX_PKT_SZ_128_TO_255 0x98
+#define MT7530_PORT_MIB_RX_PKT_SZ_256_TO_511 0x9c
+#define MT7530_PORT_MIB_RX_PKT_SZ_512_TO_1023 0xa0
+#define MT7530_PORT_MIB_RX_PKT_SZ_1024_TO_MAX 0xa4
+#define MT7530_PORT_MIB_RX_BYTES 0xa8 /* 64 bytes */
+#define MT7530_PORT_MIB_RX_CTRL_DROP 0xb0
+#define MT7530_PORT_MIB_RX_INGRESS_DROP 0xb4
+#define MT7530_PORT_MIB_RX_ARL_DROP 0xb8
#define MT7530_MIB_CCR 0x4fe0
#define CCR_MIB_ENABLE BIT(31)
#define CCR_RX_OCT_CNT_GOOD BIT(7)
@@ -631,6 +678,11 @@ enum mt7531_xtal_fsel {
#define CPORT_SW2FE_STAG_EN BIT(1)
#define CPORT_FE2SW_STAG_EN BIT(0)
+#define AN7583_GEPHY_CONN_CFG 0x7c14
+#define AN7583_CSR_DPHY_CKIN_SEL BIT(31)
+#define AN7583_CSR_PHY_CORE_REG_CLK_SEL BIT(30)
+#define AN7583_CSR_ETHER_AFE_PWD GENMASK(28, 24)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 08db846cda8d..2281d6ab8c9a 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -297,7 +297,7 @@ static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
u16 reg, mask;
chip->g1_irq.nirqs = chip->info->g1_irqs;
- chip->g1_irq.domain = irq_domain_add_simple(
+ chip->g1_irq.domain = irq_domain_create_simple(
NULL, chip->g1_irq.nirqs, 0,
&mv88e6xxx_g1_irq_domain_ops, chip);
if (!chip->g1_irq.domain)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 86bf113c9bfa..7d00482f53a3 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -241,7 +241,7 @@ struct mv88e6xxx_port_hwtstamp {
u16 tx_seq_id;
/* Current timestamp configuration */
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
};
enum mv88e6xxx_policy_mapping {
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index b2b5f6ba438f..aaf97c1e3167 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1154,8 +1154,10 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_add_simple(
- chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
+ chip->g2_irq.domain = irq_domain_create_simple(of_fwnode_handle(chip->dev->of_node),
+ 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops,
+ chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 49e6e1355142..f663799b0b3b 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -89,7 +89,7 @@ int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
@@ -169,42 +169,38 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
}
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config config;
int err;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, config);
if (err)
return err;
/* Save the chosen configuration to be returned later. */
- memcpy(&ps->tstamp_config, &config, sizeof(config));
+ ps->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- struct hwtstamp_config *config = &ps->tstamp_config;
if (!chip->info->ptp_support)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ *config = ps->tstamp_config;
+
+ return 0;
}
/* Returns a pointer to the PTP header if the caller should time stamp,
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index 85acc758e3eb..22e4acc957f0 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -111,9 +111,10 @@
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *cfg);
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
struct sk_buff *clone, unsigned int type);
@@ -132,14 +133,17 @@ int mv88e6165_global_disable(struct mv88e6xxx_chip *chip);
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
- int port, struct ifreq *ifr)
+static inline int
+mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index aed4a4b07f34..1d3b2c94c53e 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -332,13 +332,6 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
int pin;
int err;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -566,6 +559,10 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ chip->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
if (ptp_ops->set_ptp_cpu_port) {
struct dsa_port *dp;
int upstream = 0;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 0a4e682a55ef..2dd4e56e1cf1 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1774,22 +1774,25 @@ static void felix_teardown(struct dsa_switch *ds)
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config)
{
struct ocelot *ocelot = ds->priv;
- return ocelot_hwstamp_get(ocelot, port, ifr);
+ ocelot_hwstamp_get(ocelot, port, config);
+
+ return 0;
}
static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
bool using_tag_8021q;
int err;
- err = ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, config, extack);
if (err)
return err;
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index e9f2c67bc15f..79a29676ca6f 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_add_linear(np, 1, &ar9331_sw_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(np), 1,
+ &ar9331_sw_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 7e96355c28bd..964a56ee16cc 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1719,8 +1719,8 @@ static int rtl8365mb_irq_setup(struct realtek_priv *priv)
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
- &rtl8365mb_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index f54771cab56d..8bdb52b5fdcb 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -550,10 +550,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- priv->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- priv);
+ priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops, priv);
if (!priv->irqdomain) {
dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 31ea8130a495..df7466d4fe8f 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -337,8 +337,9 @@ static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
bool set)
{
- u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
- A5PSW_MCAST_DEF_MASK};
+ static const u8 offsets[] = {
+ A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK, A5PSW_MCAST_DEF_MASK
+ };
int i;
for (i = 0; i < ARRAY_SIZE(offsets); i++)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index f8454f3b6f9c..f674c400f05b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2081,6 +2081,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
@@ -2090,11 +2091,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
- case BR_STATE_LISTENING:
- mac[port].ingress = true;
- mac[port].egress = false;
- mac[port].dyn_learn = false;
- break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 198e787e8560..fefe46e2a5e6 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,19 +58,17 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
unsigned long hwts_tx_en, hwts_rx_en;
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
hwts_tx_en = priv->hwts_tx_en;
hwts_rx_en = priv->hwts_rx_en;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
hwts_tx_en &= ~BIT(port);
break;
@@ -81,7 +79,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
hwts_rx_en &= ~BIT(port);
break;
@@ -92,32 +90,28 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
return -ERANGE;
}
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
-
priv->hwts_tx_en = hwts_tx_en;
priv->hwts_rx_en = hwts_rx_en;
return 0;
}
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config)
{
struct sja1105_private *priv = ds->priv;
- struct hwtstamp_config config;
- config.flags = 0;
+ config->flags = 0;
if (priv->hwts_tx_en & BIT(port))
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
if (priv->hwts_rx_en & BIT(port))
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
@@ -737,10 +731,6 @@ static int sja1105_per_out_enable(struct sja1105_private *priv,
if (perout->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (perout->flags)
- return -EOPNOTSUPP;
-
mutex_lock(&ptp_data->lock);
rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
@@ -820,13 +810,6 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
if (extts->index != 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* We can only enable time stamping on both edges, sadly. */
if ((extts->flags & PTP_STRICT_FLAGS) &&
(extts->flags & PTP_ENABLE_FEATURE) &&
@@ -912,6 +895,9 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_pins = 1,
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
};
/* Only used on SJA1105 */
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 8add2bd5f728..325e3777ea07 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -112,9 +112,12 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
void sja1105_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
-int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config);
-int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns,
struct ptp_system_timestamp *sts);
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
index 1a4cf6a259f6..ad3ce501e7a5 100644
--- a/drivers/net/ethernet/airoha/Kconfig
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -24,4 +24,11 @@ config NET_AIROHA
This driver supports the gigabit ethernet MACs in the
Airoha SoC family.
+config NET_AIROHA_FLOW_STATS
+ default y
+ bool "Airoha flow stats"
+ depends on NET_AIROHA && NET_AIROHA_NPU
+ help
+ Enable Aiorha flowtable statistic counters.
+
endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index d748dc6de923..d1d3b854361e 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -5,6 +5,7 @@
*/
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
@@ -34,46 +35,40 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
- u32 clear, u32 set)
+static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
+ int index, u32 clear, u32 set)
{
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ int bank = irq_bank - &qdma->irq_banks[0];
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
return;
- spin_lock_irqsave(&qdma->irq_lock, flags);
+ spin_lock_irqsave(&irq_bank->irq_lock, flags);
- qdma->irqmask[index] &= ~clear;
- qdma->irqmask[index] |= set;
- airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
+ irq_bank->irqmask[index] &= ~clear;
+ irq_bank->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
+ irq_bank->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
- spin_unlock_irqrestore(&qdma->irq_lock, flags);
+ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
}
-static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, 0, mask);
+ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
}
-static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
- u32 mask)
+static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
+ int index, u32 mask)
{
- airoha_qdma_set_irqmask(qdma, index, mask, 0);
-}
-
-static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
-{
- /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
- * GDM{2,3,4} can be used as wan port connected to an external
- * phy module.
- */
- return port->id == 1;
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
}
static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
@@ -527,6 +522,25 @@ static int airoha_fe_init(struct airoha_eth *eth)
/* disable IFC by default */
airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),
+ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) |
+ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1));
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1),
+ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) |
+ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2));
+
/* enable 1:N vlan action, init vlan table */
airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
@@ -614,7 +628,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
- dma_addr_t dma_addr = le32_to_cpu(desc->addr);
struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
struct airoha_gdm_port *port;
@@ -623,22 +636,16 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
- if (!dma_addr)
- break;
-
- len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
- if (!len)
- break;
-
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- dma_sync_single_for_cpu(eth->dev, dma_addr,
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
data_len = q->skb ? q->buf_size
: SKB_WITH_OVERHEAD(q->buf_size);
- if (data_len < len)
+ if (!len || data_len < len)
goto free_frag;
p = airoha_qdma_get_gdm_port(eth, desc);
@@ -694,16 +701,19 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- airoha_ppe_check_skb(eth->ppe, hash);
+ airoha_ppe_check_skb(eth->ppe, q->skb, hash);
done++;
napi_gro_receive(&q->napi, q->skb);
q->skb = NULL;
continue;
free_frag:
- page_pool_put_full_page(q->page_pool, page, true);
- dev_kfree_skb(q->skb);
- q->skb = NULL;
+ if (q->skb) {
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
+ } else {
+ page_pool_put_full_page(q->page_pool, page, true);
+ }
}
airoha_qdma_fill_rx_queue(q);
@@ -720,9 +730,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
done += cur;
} while (cur && done < budget);
- if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ if (done < budget && napi_complete(napi)) {
+ struct airoha_qdma *qdma = q->qdma;
+ int i, qid = q - &qdma->q_rx[0];
+ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
+ : QDMA_INT_REG_IDX2;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
+ BIT(qid % RX_DONE_HIGH_OFFSET));
+ }
+ }
return done;
}
@@ -925,7 +946,7 @@ unlock:
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
@@ -1043,24 +1064,46 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
struct airoha_eth *eth = qdma->eth;
+ int id = qdma - &eth->qdma[0];
dma_addr_t dma_addr;
+ const char *name;
+ int size, index;
u32 status;
- int size;
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.desc)
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
return -ENOMEM;
airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!qdma->hfwd.q)
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
return -ENOMEM;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+ struct reserved_mem *rmem;
+ struct device_node *np;
+
+ /* Consume reserved memory for hw forwarding buffers queue if
+ * available in the DTS
+ */
+ np = of_parse_phandle(eth->dev->of_node, "memory-region",
+ index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
+ } else {
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+ }
+
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
@@ -1072,7 +1115,7 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
- LMGR_INIT_START);
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
@@ -1155,14 +1198,24 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
- /* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ /* clear pending irqs */
airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
-
- /* setup irqs */
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ /* setup rx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
+ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
+ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
+ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
+ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
+ }
+ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+ TX_COHERENT_HIGH_INT_MASK);
/* setup irq binding */
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
@@ -1207,30 +1260,39 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_qdma *qdma = dev_instance;
- u32 intr[ARRAY_SIZE(qdma->irqmask)];
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
+ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ for (i = 0; i < ARRAY_SIZE(intr); i++) {
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
- intr[i] &= qdma->irqmask[i];
+ intr[i] &= irq_bank->irqmask[i];
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
- if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
- RX_DONE_INT_MASK);
+ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
+ if (rx_intr1) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
+ rx_intr_mask |= rx_intr1;
+ }
- for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
- if (!qdma->q_rx[i].ndesc)
- continue;
+ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
+ if (rx_intr2) {
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
+ rx_intr_mask |= (rx_intr2 << 16);
+ }
- if (intr[1] & BIT(i))
- napi_schedule(&qdma->q_rx[i].napi);
- }
+ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ if (rx_intr_mask & BIT(i))
+ napi_schedule(&qdma->q_rx[i].napi);
}
if (intr[0] & INT_TX_MASK) {
@@ -1238,7 +1300,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
napi_schedule(&qdma->q_tx_irq[i].napi);
}
@@ -1247,6 +1309,39 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
+ struct airoha_qdma *qdma)
+{
+ struct airoha_eth *eth = qdma->eth;
+ int i, id = qdma - &eth->qdma[0];
+
+ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
+ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
+ int err, irq_index = 4 * id + i;
+ const char *name;
+
+ spin_lock_init(&irq_bank->irq_lock);
+ irq_bank->qdma = qdma;
+
+ irq_bank->irq = platform_get_irq(pdev, irq_index);
+ if (irq_bank->irq < 0)
+ return irq_bank->irq;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL,
+ KBUILD_MODNAME ".%d", irq_index);
+ if (!name)
+ return -ENOMEM;
+
+ err = devm_request_irq(eth->dev, irq_bank->irq,
+ airoha_irq_handler, IRQF_SHARED, name,
+ irq_bank);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int airoha_qdma_init(struct platform_device *pdev,
struct airoha_eth *eth,
struct airoha_qdma *qdma)
@@ -1254,9 +1349,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
int err, id = qdma - &eth->qdma[0];
const char *res;
- spin_lock_init(&qdma->irq_lock);
qdma->eth = eth;
-
res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
if (!res)
return -ENOMEM;
@@ -1266,12 +1359,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
"failed to iomap qdma%d regs\n", id);
- qdma->irq = platform_get_irq(pdev, 4 * id);
- if (qdma->irq < 0)
- return qdma->irq;
-
- err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, qdma);
+ err = airoha_qdma_init_irq_banks(pdev, qdma);
if (err)
return err;
@@ -1631,7 +1719,6 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
if (port->id == 3) {
/* FIXME: handle XSI_PCE1_PORT */
- airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500);
airoha_fe_rmw(eth, REG_FE_WAN_PORT,
WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
@@ -2109,6 +2196,125 @@ static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
}
}
+static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
+ REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, enum trtcm_param_type param,
+ u32 val)
+{
+ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
+ u32 config = RATE_LIMIT_PARAM_RW_MASK |
+ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
+ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
+ u32 addr, bool enable, u32 enable_mask)
+{
+ u32 val;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ &val, NULL);
+ if (err)
+ return err;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
+ val);
+}
+
+static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
+ int queue_id, u32 rate_val,
+ u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_MISC_MODE, &config, NULL);
+ if (err)
+ return err;
+
+ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_TOKEN_RATE_MODE, rate);
+ if (err)
+ return err;
+
+ val = bucket_size;
+ if (!(config & TRTCM_PKT_MODE))
+ val = max_t(u32, val, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
+}
+
+static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
+ bool enable, enum trtcm_unit_type unit)
+{
+ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
+ enum trtcm_param mode = TRTCM_METER_MODE;
+ int err;
+
+ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
+ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ enable, mode);
+ if (err)
+ return err;
+
+ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
+ tick_sel, TRTCM_TICK_SEL);
+}
+
static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
u32 addr, enum trtcm_param_type param,
enum trtcm_mode_type mode,
@@ -2273,10 +2479,142 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
return 0;
}
+static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
+ u32 rate, u32 bucket_size,
+ enum trtcm_unit_type unit_type)
+{
+ struct airoha_qdma *qdma = port->qdma;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
+ bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
+{
+ const struct flow_action *actions = &f->rule->action;
+ const struct flow_action_entry *act;
+
+ if (!flow_action_has_entries(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "filter run with no actions");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(actions)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &actions->entries[0];
+ if (act->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid exceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "invalid notexceed action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(actions, act)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "action accept must be last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps || act->police.avrate ||
+ act->police.overhead || act->police.mtu) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "peakrate/avrate/overhead/mtu unsupported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *f)
+{
+ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 rate = 0, bucket_size = 0;
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE: {
+ const struct flow_action_entry *act;
+ int err;
+
+ err = airoha_tc_matchall_act_validate(f);
+ if (err)
+ return err;
+
+ act = &f->rule->action.entries[0];
+ if (act->police.rate_pkt_ps) {
+ rate = act->police.rate_pkt_ps;
+ bucket_size = act->police.burst_pkt;
+ unit_type = TRTCM_PACKET_UNIT;
+ } else {
+ rate = div_u64(act->police.rate_bytes_ps, 1000);
+ rate = rate << 3; /* Kbps */
+ bucket_size = act->police.burst;
+ }
+ fallthrough;
+ }
+ case TC_CLSMATCHALL_DESTROY:
+ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
+ unit_type);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct net_device *dev = cb_priv;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return airoha_ppe_setup_tc_block_cb(dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
struct flow_block_offload *f)
{
- flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
+ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
static LIST_HEAD(block_cb_list);
struct flow_block_cb *block_cb;
@@ -2515,7 +2853,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
dev->features |= dev->hw_features;
dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
- dev->irq = qdma->irq;
+ dev->irq = qdma->irq_banks[0].irq;
SET_NETDEV_DEV(dev, eth->dev);
/* reserve hw queues for HTB offloading */
@@ -2545,7 +2883,15 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
if (err)
return err;
- return register_netdev(dev);
+ err = register_netdev(dev);
+ if (err)
+ goto free_metadata_dst;
+
+ return 0;
+
+free_metadata_dst:
+ airoha_metadata_dst_free(port);
+ return err;
}
static int airoha_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index ec8908f904c6..b815697302bf 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -17,6 +17,7 @@
#define AIROHA_MAX_NUM_GDM_PORTS 4
#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
@@ -49,6 +50,14 @@
#define PPE_NUM 2
#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
+#ifdef CONFIG_NET_AIROHA_FLOW_STATS
+#define PPE1_STATS_NUM_ENTRIES (4 * 1024)
+#else
+#define PPE1_STATS_NUM_ENTRIES 0
+#endif /* CONFIG_NET_AIROHA_FLOW_STATS */
+#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES)
+#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES)
+#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES)
#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
@@ -127,6 +136,11 @@ enum tx_sched_mode {
TC_SCH_WRR2,
};
+enum trtcm_unit_type {
+ TRTCM_BYTE_UNIT,
+ TRTCM_PACKET_UNIT,
+};
+
enum trtcm_param_type {
TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
TRTCM_TOKEN_RATE_MODE,
@@ -255,6 +269,8 @@ struct airoha_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+
+ u32 meter;
};
#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
@@ -290,6 +306,11 @@ struct airoha_foe_mac_info {
#define AIROHA_FOE_TUNNEL BIT(6)
#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
+#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
+#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
+#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
+
struct airoha_foe_bridge {
u32 dest_mac_hi;
@@ -373,6 +394,8 @@ struct airoha_foe_ipv6 {
u32 ib2;
struct airoha_foe_mac_info_common l2;
+
+ u32 meter;
};
struct airoha_foe_entry {
@@ -391,6 +414,16 @@ struct airoha_foe_entry {
};
};
+struct airoha_foe_stats {
+ u32 bytes;
+ u32 packets;
+};
+
+struct airoha_foe_stats64 {
+ u64 bytes;
+ u64 packets;
+};
+
struct airoha_flow_data {
struct ethhdr eth;
@@ -422,37 +455,64 @@ struct airoha_flow_data {
} pppoe;
};
+enum airoha_flow_entry_type {
+ FLOW_TYPE_L4,
+ FLOW_TYPE_L2,
+ FLOW_TYPE_L2_SUBFLOW,
+};
+
struct airoha_flow_table_entry {
- struct hlist_node list;
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+ struct {
+ struct rhash_head l2_node; /* L2 flow entry */
+ struct hlist_head l2_flows; /* PPE L2 subflows list */
+ };
+ };
struct airoha_foe_entry data;
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
u32 hash;
+ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
struct rhash_head node;
unsigned long cookie;
};
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
+/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+#define RX_IRQ0_BANK_PIN_MASK 0x839f
+#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+#define RX_IRQ2_BANK_PIN_MASK 0x20
+#define RX_IRQ3_BANK_PIN_MASK 0x40
+#define RX_IRQ_BANK_PIN_MASK(_n) \
+ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
+ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
+ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
+ RX_IRQ0_BANK_PIN_MASK)
+
+struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
/* protect concurrent irqmask accesses */
spinlock_t irq_lock;
u32 irqmask[QDMA_INT_REG_MAX];
int irq;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
atomic_t users;
+ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
+
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
};
struct airoha_gdm_port {
@@ -480,9 +540,14 @@ struct airoha_ppe {
void *foe;
dma_addr_t foe_dma;
+ struct rhashtable l2_flows;
+
struct hlist_head *foe_flow;
u16 foe_check_time[PPE_NUM_ENTRIES];
+ struct airoha_foe_stats *foe_stats;
+ dma_addr_t foe_stats_dma;
+
struct dentry *debugfs_dir;
};
@@ -532,16 +597,27 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
#define airoha_qdma_clear(qdma, offset, val) \
airoha_rmw((qdma)->regs, (offset), (val), 0)
+static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+{
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+
bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
struct airoha_gdm_port *port);
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
-int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash);
+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash);
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats);
#ifdef CONFIG_DEBUG_FS
int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index ead0625e781f..0e5b8c21b9aa 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -12,6 +12,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/regmap.h>
+#include "airoha_eth.h"
#include "airoha_npu.h"
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
@@ -72,6 +73,7 @@ enum {
PPE_FUNC_SET_WAIT_HWNAT_INIT,
PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
PPE_FUNC_SET_WAIT_API,
+ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
};
enum {
@@ -115,6 +117,10 @@ struct ppe_mbox_data {
u32 size;
u32 data;
} set_info;
+ struct {
+ u32 npu_stats_addr;
+ u32 foe_stats_addr;
+ } stats_info;
};
};
@@ -124,17 +130,12 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
u16 core = 0; /* FIXME */
u32 val, offset = core << 4;
dma_addr_t dma_addr;
- void *addr;
int ret;
- addr = kmemdup(p, size, GFP_ATOMIC);
- if (!addr)
- return -ENOMEM;
-
- dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
+ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
ret = dma_mapping_error(npu->dev, dma_addr);
if (ret)
- goto out;
+ return ret;
spin_lock_bh(&npu->cores[core].lock);
@@ -155,8 +156,6 @@ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
spin_unlock_bh(&npu->cores[core].lock);
dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
-out:
- kfree(addr);
return ret;
}
@@ -261,79 +260,137 @@ static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
static int airoha_npu_ppe_init(struct airoha_npu *npu)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
- .init_info = {
- .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
- .wan_mode = QDMA_WAN_ETHER,
- },
- };
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
+ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
+ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
}
static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
- };
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
+
+ return err;
}
static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
dma_addr_t foe_addr,
int sram_num_entries)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_API,
- .set_info = {
- .func_id = PPE_SRAM_RESET_VAL,
- .data = foe_addr,
- .size = sram_num_entries,
- },
- };
+ struct ppe_mbox_data *ppe_data;
+ int err;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = sram_num_entries;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ kfree(ppe_data);
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ return err;
}
static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
dma_addr_t foe_addr,
u32 entry_size, u32 hash, bool ppe2)
{
- struct ppe_mbox_data ppe_data = {
- .func_type = NPU_OP_SET,
- .func_id = PPE_FUNC_SET_WAIT_API,
- .set_info = {
- .data = foe_addr,
- .size = entry_size,
- },
- };
+ struct ppe_mbox_data *ppe_data;
int err;
- ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
- : PPE_SRAM_SET_ENTRY;
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
+ ppe_data->set_info.data = foe_addr;
+ ppe_data->set_info.size = entry_size;
+ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
- err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
if (err)
- return err;
+ goto out;
+
+ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data->set_info.data = hash;
+ ppe_data->set_info.size = sizeof(u32);
- ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
- ppe_data.set_info.data = hash;
- ppe_data.set_info.size = sizeof(u32);
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+out:
+ kfree(ppe_data);
- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
- sizeof(struct ppe_mbox_data));
+ return err;
}
-struct airoha_npu *airoha_npu_get(struct device *dev)
+static int airoha_npu_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr)
+{
+ int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
+ struct ppe_mbox_data *ppe_data;
+
+ if (!size) /* flow stats are disabled */
+ return 0;
+
+ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
+ if (!ppe_data)
+ return -ENOMEM;
+
+ ppe_data->func_type = NPU_OP_SET;
+ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
+ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
+ sizeof(*ppe_data));
+ if (err)
+ goto out;
+
+ npu->stats = devm_ioremap(npu->dev,
+ ppe_data->stats_info.npu_stats_addr,
+ size);
+ if (!npu->stats)
+ err = -ENOMEM;
+out:
+ kfree(ppe_data);
+
+ return err;
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
{
struct platform_device *pdev;
struct device_node *np;
@@ -371,6 +428,17 @@ struct airoha_npu *airoha_npu_get(struct device *dev)
goto error_module_put;
}
+ if (stats_addr) {
+ int err;
+
+ err = airoha_npu_stats_setup(npu, *stats_addr);
+ if (err) {
+ dev_err(dev, "failed to allocate npu stats buffer\n");
+ npu = ERR_PTR(err);
+ goto error_module_put;
+ }
+ }
+
return npu;
error_module_put:
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
index a2b8ae4d9473..98ec3be74ce4 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.h
+++ b/drivers/net/ethernet/airoha/airoha_npu.h
@@ -17,6 +17,8 @@ struct airoha_npu {
struct work_struct wdt_work;
} cores[NPU_NUM_CORES];
+ struct airoha_foe_stats __iomem *stats;
+
struct {
int (*ppe_init)(struct airoha_npu *npu);
int (*ppe_deinit)(struct airoha_npu *npu);
@@ -30,5 +32,5 @@ struct airoha_npu {
} ops;
};
-struct airoha_npu *airoha_npu_get(struct device *dev);
+struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index f10dab935cab..12d32c92717a 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -24,6 +24,13 @@ static const struct rhashtable_params airoha_flow_table_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params airoha_l2_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
+ .key_len = 2 * ETH_ALEN,
+ .automatic_shrinking = true,
+};
+
static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
{
return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
@@ -77,6 +84,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_TB_CFG_KEEPALIVE_MASK |
PPE_TB_ENTRY_SIZE_MASK,
FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
@@ -95,7 +103,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
if (airoha_ppe2_is_enabled(eth)) {
sram_num_entries =
- PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
@@ -112,7 +120,7 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
dram_num_entries));
} else {
sram_num_entries =
- PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
PPE_SRAM_TB_NUM_ENTRY_MASK |
PPE_DRAM_TB_NUM_ENTRY_MASK,
@@ -197,6 +205,15 @@ static int airoha_get_dsa_port(struct net_device **dev)
#endif
}
+static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
+ struct ethhdr *eh)
+{
+ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
+ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
+ br->src_mac_hi = get_unaligned_be16(eh->h_source);
+ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
+}
+
static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
struct airoha_foe_entry *hwe,
struct net_device *dev, int type,
@@ -234,6 +251,12 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
else
pse_port = 2; /* uplink relies on GDM2 loopback */
val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+
+ /* For downlink traffic consume SRAM memory for hw forwarding
+ * descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
}
if (is_multicast_ether_addr(data->eth.h_dest))
@@ -247,13 +270,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
if (type == PPE_PKT_TYPE_BRIDGE) {
- hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
- hwe->bridge.dest_mac_lo =
- get_unaligned_be16(data->eth.h_dest + 4);
- hwe->bridge.src_mac_hi =
- get_unaligned_be16(data->eth.h_source);
- hwe->bridge.src_mac_lo =
- get_unaligned_be32(data->eth.h_source + 2);
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
hwe->bridge.data = qdata;
hwe->bridge.ib2 = val;
l2 = &hwe->bridge.l2.common;
@@ -378,6 +395,19 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
hv3 ^= hwe->ipv6.src_ip[0];
break;
+ case PPE_PKT_TYPE_BRIDGE: {
+ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
+
+ hv1 = l2->common.src_mac_hi & 0xffff;
+ hv1 = hv1 << 16 | l2->src_mac_lo;
+
+ hv2 = l2->common.dest_mac_lo;
+ hv2 = hv2 << 16;
+ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
+
+ hv3 = l2->common.dest_mac_hi;
+ break;
+ }
case PPE_PKT_TYPE_IPV4_DSLITE:
case PPE_PKT_TYPE_IPV6_6RD:
default:
@@ -394,6 +424,77 @@ static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
return hash;
}
+static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
+{
+ if (!airoha_ppe2_is_enabled(ppe->eth))
+ return hash;
+
+ return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
+ : hash;
+}
+
+static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ int index)
+{
+ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
+ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
+}
+
+static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i;
+
+ for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
+}
+
+static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
+ struct airoha_npu *npu,
+ struct airoha_foe_entry *hwe,
+ u32 hash)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 index, pse_port, val, *data, *ib2, *meter;
+ u8 nbq;
+
+ index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
+ if (index >= PPE_STATS_NUM_ENTRIES)
+ return;
+
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ data = &hwe->bridge.data;
+ ib2 = &hwe->bridge.ib2;
+ meter = &hwe->bridge.l2.meter;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = &hwe->ipv6.data;
+ ib2 = &hwe->ipv6.ib2;
+ meter = &hwe->ipv6.meter;
+ } else {
+ data = &hwe->ipv4.data;
+ ib2 = &hwe->ipv4.ib2;
+ meter = &hwe->ipv4.l2.meter;
+ }
+
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+ *data = (*data & ~AIROHA_FOE_ACTDP) |
+ FIELD_PREP(AIROHA_FOE_ACTDP, val);
+
+ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
+ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+}
+
struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
u32 hash)
{
@@ -447,6 +548,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
u32 ts = airoha_ppe_get_timestamp(ppe);
struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+ int err = 0;
memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
wmb();
@@ -455,31 +558,127 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
hwe->ib1 = e->ib1;
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (!npu) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
if (hash < PPE_SRAM_NUM_ENTRIES) {
dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
bool ppe2 = airoha_ppe2_is_enabled(eth) &&
hash >= PPE1_SRAM_NUM_ENTRIES;
- struct airoha_npu *npu;
- int err = -ENODEV;
- rcu_read_lock();
- npu = rcu_dereference(eth->npu);
- if (npu)
- err = npu->ops.ppe_foe_commit_entry(npu, addr,
- sizeof(*hwe), hash,
- ppe2);
- rcu_read_unlock();
+ err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
+ hash, ppe2);
+ }
+unlock:
+ rcu_read_unlock();
+
+ return err;
+}
- return err;
+static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ hlist_del_init(&e->l2_subflow_node);
+ kfree(e);
}
+}
+
+static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct hlist_head *head = &e->l2_flows;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
+ airoha_ppe_foe_remove_flow(ppe, e);
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ if (e->type == FLOW_TYPE_L2)
+ airoha_ppe_foe_remove_l2_flow(ppe, e);
+ else
+ airoha_ppe_foe_remove_flow(ppe, e);
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+ u32 hash)
+{
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+ struct airoha_foe_mac_info *l2;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return -ENOMEM;
+
+ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
+ f->type = FLOW_TYPE_L2_SUBFLOW;
+ f->hash = hash;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+ l2 = &hwe.bridge.l2;
+ memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+ sizeof(hwe.ipv4.new_tuple));
+ else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
+ l2->common.etype == ETH_P_IP)
+ l2->common.etype = ETH_P_IPV6;
+
+ hwe.bridge.ib2 = e->data.bridge.ib2;
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
return 0;
}
-static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+ u32 hash)
{
struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
struct airoha_foe_entry *hwe;
+ bool commit_done = false;
struct hlist_node *n;
u32 index, state;
@@ -495,21 +694,68 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
index = airoha_ppe_foe_get_entry_hash(hwe);
hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
- if (airoha_ppe_foe_compare_entry(e, hwe)) {
- airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
- e->hash = hash;
- break;
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ e->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+ continue;
+ }
+
+ if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
+ e->hash = 0xffff;
+ continue;
}
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ commit_done = true;
+ e->hash = hash;
}
+
+ if (commit_done)
+ goto unlock;
+
+ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
unlock:
spin_unlock_bh(&ppe_lock);
}
+static int
+airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ struct airoha_flow_table_entry *prev;
+
+ e->type = FLOW_TYPE_L2;
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
+ if (!prev)
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &e->l2_node,
+ airoha_l2_flow_table_params);
+}
+
static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e)
{
- u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ u32 hash;
+
+ if (type == PPE_PKT_TYPE_BRIDGE)
+ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
+ hash = airoha_ppe_foe_get_entry_hash(&e->data);
+ e->type = FLOW_TYPE_L4;
e->hash = 0xffff;
spin_lock_bh(&ppe_lock);
@@ -519,23 +765,98 @@ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
return 0;
}
-static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
+{
+ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
+ int idle;
+
+ if (state == AIROHA_FOE_STATE_BIND) {
+ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ } else {
+ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
+ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
+ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
+ }
+ idle = now - ts;
+
+ return idle < 0 ? idle + ts_mask + 1 : idle;
+}
+
+static void
+airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ struct airoha_flow_table_entry *iter;
+ struct hlist_node *n;
+
+ lockdep_assert_held(&ppe_lock);
+
+ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
+ struct airoha_foe_entry *hwe;
+ u32 ib1, state;
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+ ib1 = READ_ONCE(hwe->ib1);
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
+ airoha_ppe_foe_remove_flow(ppe, iter);
+ continue;
+ }
+
+ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
+ if (idle >= min_idle)
+ continue;
+
+ min_idle = idle;
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ }
+}
+
+static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e)
{
+ struct airoha_foe_entry *hwe_p, hwe = {};
+
spin_lock_bh(&ppe_lock);
- hlist_del_init(&e->list);
- if (e->hash != 0xffff) {
- e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
- e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
- AIROHA_FOE_STATE_INVALID);
- airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ if (e->type == FLOW_TYPE_L2) {
+ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
+ goto unlock;
+ }
+
+ if (e->hash == 0xffff)
+ goto unlock;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
e->hash = 0xffff;
+ goto unlock;
}
+ e->data.ib1 = hwe.ib1;
+unlock:
spin_unlock_bh(&ppe_lock);
}
+static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ airoha_ppe_foe_flow_entry_update(ppe, e);
+
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+}
+
static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
@@ -751,6 +1072,60 @@ static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
return 0;
}
+void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+ struct airoha_foe_stats64 *stats)
+{
+ u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
+ struct airoha_eth *eth = ppe->eth;
+ struct airoha_npu *npu;
+
+ if (index >= PPE_STATS_NUM_ENTRIES)
+ return;
+
+ rcu_read_lock();
+
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ u64 packets = ppe->foe_stats[index].packets;
+ u64 bytes = ppe->foe_stats[index].bytes;
+ struct airoha_foe_stats npu_stats;
+
+ memcpy_fromio(&npu_stats, &npu->stats[index],
+ sizeof(*npu->stats));
+ stats->packets = packets << 32 | npu_stats.packets;
+ stats->bytes = bytes << 32 | npu_stats.bytes;
+ }
+
+ rcu_read_unlock();
+}
+
+static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (e->hash != 0xffff) {
+ struct airoha_foe_stats64 stats = {};
+
+ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
+ f->stats.pkts += (stats.packets - e->stats.packets);
+ f->stats.bytes += (stats.bytes - e->stats.bytes);
+ e->stats = stats;
+ }
+
+ return 0;
+}
+
static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
struct flow_cls_offload *f)
{
@@ -759,6 +1134,8 @@ static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
return airoha_ppe_flow_offload_replace(port, f);
case FLOW_CLS_DESTROY:
return airoha_ppe_flow_offload_destroy(port, f);
+ case FLOW_CLS_STATS:
+ return airoha_ppe_flow_offload_stats(port, f);
default:
break;
}
@@ -784,11 +1161,12 @@ static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
{
- struct airoha_npu *npu = airoha_npu_get(eth->dev);
+ struct airoha_npu *npu = airoha_npu_get(eth->dev,
+ &eth->ppe->foe_stats_dma);
if (IS_ERR(npu)) {
request_module("airoha-npu");
- npu = airoha_npu_get(eth->dev);
+ npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
}
return npu;
@@ -811,6 +1189,8 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
if (err)
goto error_npu_put;
+ airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
+
rcu_assign_pointer(eth->npu, npu);
synchronize_rcu();
@@ -822,18 +1202,13 @@ error_npu_put:
return err;
}
-int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
+int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
{
- struct flow_cls_offload *cls = type_data;
- struct net_device *dev = cb_priv;
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct flow_cls_offload *cls = type_data;
struct airoha_eth *eth = port->qdma->eth;
int err = 0;
- if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
- return -EOPNOTSUPP;
-
mutex_lock(&flow_offload_mutex);
if (!eth->npu)
@@ -846,7 +1221,8 @@ int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return err;
}
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash)
{
u16 now, diff;
@@ -859,7 +1235,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
return;
ppe->foe_check_time[hash] = now;
- airoha_ppe_foe_insert_entry(ppe, hash);
+ airoha_ppe_foe_insert_entry(ppe, skb, hash);
}
int airoha_ppe_init(struct airoha_eth *eth)
@@ -886,13 +1262,33 @@ int airoha_ppe_init(struct airoha_eth *eth)
if (!ppe->foe_flow)
return -ENOMEM;
+ foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
+ if (foe_size) {
+ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
+ &ppe->foe_stats_dma,
+ GFP_KERNEL);
+ if (!ppe->foe_stats)
+ return -ENOMEM;
+ }
+
err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
if (err)
return err;
+ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
+ if (err)
+ goto error_flow_table_destroy;
+
err = airoha_ppe_debugfs_init(ppe);
if (err)
- rhashtable_destroy(&eth->flow_table);
+ goto error_l2_flow_table_destroy;
+
+ return 0;
+
+error_l2_flow_table_destroy:
+ rhashtable_destroy(&ppe->l2_flows);
+error_flow_table_destroy:
+ rhashtable_destroy(&eth->flow_table);
return err;
}
@@ -909,6 +1305,7 @@ void airoha_ppe_deinit(struct airoha_eth *eth)
}
rcu_read_unlock();
+ rhashtable_destroy(&eth->ppe->l2_flows);
rhashtable_destroy(&eth->flow_table);
debugfs_remove(eth->ppe->debugfs_dir);
}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
index 3cdc6fd53fc7..05a756233f6a 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -61,6 +61,7 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
u16 *src_port = NULL, *dest_port = NULL;
struct airoha_foe_mac_info_common *l2;
unsigned char h_source[ETH_ALEN] = {};
+ struct airoha_foe_stats64 stats = {};
unsigned char h_dest[ETH_ALEN];
struct airoha_foe_entry *hwe;
u32 type, state, ib2, data;
@@ -144,14 +145,18 @@ static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
}
+ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
+
*((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
*((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
*((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
- " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
h_source, h_dest, l2->etype, data,
- l2->vlan1, l2->vlan2, hwe->ib1, ib2);
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
+ stats.packets, stats.bytes);
}
return 0;
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 8146cde4e8ba..d931530fc96f 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -283,6 +283,7 @@
#define PPE_HASH_SEED 0x12345678
#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
@@ -422,11 +423,12 @@
((_n) == 2) ? 0x0720 : \
((_n) == 1) ? 0x0024 : 0x0020)
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
+#define REG_INT_ENABLE(_b, _n) \
+ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
+ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
+ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
+ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
+ 0x0028 + ((_b) << 3))
/* QDMA_CSR_INT_ENABLE1 */
#define RX15_COHERENT_INT_MASK BIT(31)
@@ -461,6 +463,26 @@
#define IRQ0_FULL_INT_MASK BIT(1)
#define IRQ0_INT_MASK BIT(0)
+#define RX_COHERENT_LOW_INT_MASK \
+ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
+ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
+ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
+ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
+ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
+ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
+ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
+ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
+
+#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
+#define INT_RX0_MASK(_n) \
+ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
+
+#define TX_COHERENT_LOW_INT_MASK \
+ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
+ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
+ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
+ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
+
#define TX_DONE_INT_MASK(_n) \
((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
: IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
@@ -469,17 +491,6 @@
(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
/* QDMA_CSR_INT_ENABLE2 */
#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
@@ -514,19 +525,121 @@
#define RX1_DONE_INT_MASK BIT(1)
#define RX0_DONE_INT_MASK BIT(0)
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
+#define RX_NO_CPU_DSCP_LOW_INT_MASK \
+ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
+ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
+ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
+ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
+ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
+ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
+ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
+ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_LOW_INT_MASK \
+ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
+ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
+ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
+ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
+ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
+ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
+ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
+ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
+
+#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
+#define INT_RX1_MASK(_n) \
+ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
+ (RX_DONE_LOW_INT_MASK & (_n)))
+
+/* QDMA_CSR_INT_ENABLE3 */
+#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX31_DONE_INT_MASK BIT(15)
+#define RX30_DONE_INT_MASK BIT(14)
+#define RX29_DONE_INT_MASK BIT(13)
+#define RX28_DONE_INT_MASK BIT(12)
+#define RX27_DONE_INT_MASK BIT(11)
+#define RX26_DONE_INT_MASK BIT(10)
+#define RX25_DONE_INT_MASK BIT(9)
+#define RX24_DONE_INT_MASK BIT(8)
+#define RX23_DONE_INT_MASK BIT(7)
+#define RX22_DONE_INT_MASK BIT(6)
+#define RX21_DONE_INT_MASK BIT(5)
+#define RX20_DONE_INT_MASK BIT(4)
+#define RX19_DONE_INT_MASK BIT(3)
+#define RX18_DONE_INT_MASK BIT(2)
+#define RX17_DONE_INT_MASK BIT(1)
+#define RX16_DONE_INT_MASK BIT(0)
+
+#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
+ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
+ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
+ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
+ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
+ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
+ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
+ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
+ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
+
+#define RX_DONE_HIGH_INT_MASK \
+ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
+ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
+ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
+ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
+ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
+ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
+#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+
+#define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
+ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
+
+/* QDMA_CSR_INT_ENABLE4 */
+#define RX31_COHERENT_INT_MASK BIT(31)
+#define RX30_COHERENT_INT_MASK BIT(30)
+#define RX29_COHERENT_INT_MASK BIT(29)
+#define RX28_COHERENT_INT_MASK BIT(28)
+#define RX27_COHERENT_INT_MASK BIT(27)
+#define RX26_COHERENT_INT_MASK BIT(26)
+#define RX25_COHERENT_INT_MASK BIT(25)
+#define RX24_COHERENT_INT_MASK BIT(24)
+#define RX23_COHERENT_INT_MASK BIT(23)
+#define RX22_COHERENT_INT_MASK BIT(22)
+#define RX21_COHERENT_INT_MASK BIT(21)
+#define RX20_COHERENT_INT_MASK BIT(20)
+#define RX19_COHERENT_INT_MASK BIT(19)
+#define RX18_COHERENT_INT_MASK BIT(18)
+#define RX17_COHERENT_INT_MASK BIT(17)
+#define RX16_COHERENT_INT_MASK BIT(16)
+
+#define RX_COHERENT_HIGH_INT_MASK \
+ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
+ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
+ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
+ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
+ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
+ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
+ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
+ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
+
+#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
/* QDMA_CSR_INT_ENABLE5 */
#define TX31_COHERENT_INT_MASK BIT(31)
@@ -554,19 +667,19 @@
#define TX9_COHERENT_INT_MASK BIT(9)
#define TX8_COHERENT_INT_MASK BIT(8)
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+#define TX_COHERENT_HIGH_INT_MASK \
+ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
+ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
+ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
+ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
+ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
+ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
+ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
+ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
+ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
+ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
+ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
+ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
@@ -691,6 +804,12 @@
#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
+#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
+#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
+#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
+#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
+
#define REG_TXWRR_MODE_CFG 0x1020
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
#define TWRR_WEIGHT_BASE_MASK BIT(3)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 897720fdf5d8..7d9ee9867a40 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1777,7 +1777,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
if (ENA_IS_XDP_INDEX(adapter, i))
napi_handler = ena_xdp_io_poll;
- netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+ netif_napi_add_config(adapter->netdev, &napi->napi, napi_handler, i);
if (!ENA_IS_XDP_INDEX(adapter, i))
napi->rx_ring = rx_ring;
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index 506f682d15c1..097bb092bdb8 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -225,7 +225,7 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- unsigned long poll_interval = 1;
+ unsigned long poll_interval = 200;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
@@ -252,7 +252,7 @@ int pdsc_adminq_post(struct pdsc *pdsc,
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
- poll_jiffies = msecs_to_jiffies(poll_interval);
+ poll_jiffies = usecs_to_jiffies(poll_interval);
remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 9512aa4083f0..076dfe2910c7 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -402,6 +402,7 @@ err_out_uninit:
static struct pdsc_viftype pdsc_viftype_defaults[] = {
[PDS_DEV_TYPE_FWCTL] = { .name = PDS_DEV_TYPE_FWCTL_STR,
+ .enabled = true,
.vif_id = PDS_DEV_TYPE_FWCTL,
.dl_id = -1 },
[PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
@@ -414,7 +415,8 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
{
enum pds_core_vif_types vt;
- pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ pdsc->viftype_status = kcalloc(ARRAY_SIZE(pdsc_viftype_defaults),
+ sizeof(*pdsc->viftype_status),
GFP_KERNEL);
if (!pdsc->viftype_status)
return -ENOMEM;
@@ -431,9 +433,6 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
/* See what the Core device has for support */
vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
- if (vt == PDS_DEV_TYPE_FWCTL)
- pdsc->viftype_status[vt].enabled = true;
-
dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
pdsc->viftype_status[vt].name,
vt_support ? "" : "not ");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3b70f6737633..e1296cbf4ff3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_COMMON_H__
@@ -900,6 +791,11 @@
#define PCS_V2_RV_WINDOW_SELECT 0x1064
#define PCS_V2_YC_WINDOW_DEF 0x18060
#define PCS_V2_YC_WINDOW_SELECT 0x18064
+#define PCS_V3_RN_WINDOW_DEF 0xf8078
+#define PCS_V3_RN_WINDOW_SELECT 0xf807c
+
+#define PCS_RN_SMN_BASE_ADDR 0x11e00000
+#define PCS_RN_PORT_ADDR_SIZE 0x100000
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index c68ace804e37..1474df5544fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b35808d3d07f..d9157c4acde9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/debugfs.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index d41b58fad37b..7c8a19988a52 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include "xgbe.h"
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f1b0fb02b3cd..466b5f6e5578 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/phy.h>
@@ -120,9 +11,11 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/pci.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include "xgbe-smn.h"
static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
{
@@ -1162,18 +1055,19 @@ static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
return 0;
}
-static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
- int mmd_reg)
+static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
+ int mmd_reg)
{
- unsigned long flags;
- unsigned int mmd_address, index, offset;
- int mmd_data;
-
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ return (mmd_reg & XGBE_ADDR_C45) ?
+ mmd_reg & ~XGBE_ADDR_C45 :
+ (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+}
+static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
+ unsigned int mmd_address,
+ unsigned int *index,
+ unsigned int *offset)
+{
/* The PCS registers are accessed using mmio. The underlying
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1184,8 +1078,98 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
* offset 1 bit and reading 16 bits of data.
*/
mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ *index = mmd_address & ~pdata->xpcs_window_mask;
+ *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+}
+
+static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ u32 smn_address;
+ int mmd_data;
+ int ret;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret)
+ return ret;
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
+ if (ret)
+ return ret;
+
+ mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
+ FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int pci_mmd_data, hi_mask, lo_mask;
+ unsigned int mmd_address, index, offset;
+ struct pci_dev *dev;
+ u32 smn_address;
+ int ret;
+
+ dev = pdata->pcidev;
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
+
+ smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to read data\n");
+ return;
+ }
+
+ if (offset % 4) {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
+ } else {
+ hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
+ FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
+ lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
+ }
+
+ pci_mmd_data = hi_mask | lo_mask;
+
+ ret = amd_smn_write(0, smn_address, index);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", index);
+ return;
+ }
+
+ ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
+ if (ret) {
+ pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
+ return;
+ }
+}
+
+static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ unsigned long flags;
+ int mmd_data;
+
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
+
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1201,23 +1185,9 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
- /* The PCS registers are accessed using mmio. The underlying
- * management interface uses indirect addressing to access the MMD
- * register sets. This requires accessing of the PCS register in two
- * phases, an address phase and a data phase.
- *
- * The mmio interface is based on 16-bit offsets and values. All
- * register offsets must therefore be adjusted by left shifting the
- * offset 1 bit and writing 16 bits of data.
- */
- mmd_address <<= 1;
- index = mmd_address & ~pdata->xpcs_window_mask;
- offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+ xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
@@ -1232,10 +1202,7 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1260,10 +1227,7 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & XGBE_ADDR_C45)
- mmd_address = mmd_reg & ~XGBE_ADDR_C45;
- else
- mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+ mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
@@ -1290,6 +1254,9 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
}
}
@@ -1300,6 +1267,9 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
case XGBE_XPCS_ACCESS_V1:
return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
+ case XGBE_XPCS_ACCESS_V3:
+ return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
+
case XGBE_XPCS_ACCESS_V2:
default:
return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 8e09ad8fa022..76c328721fcd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 4431ab1c18b3..be0d2c7d08dc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 7a833894f52a..d40011e8ddf2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 0e8698928e4d..4ebdd123c435 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 07f4f3418d01..71449edbb76d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c636999a6a84..097ec5e4f261 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -1,123 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/log2.h>
+#include "xgbe-smn.h"
#include "xgbe.h"
#include "xgbe-common.h"
@@ -207,14 +99,14 @@ out:
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct xgbe_prv_data *pdata;
- struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
- struct pci_dev *rdev;
+ unsigned int port_addr_size, reg;
+ struct device *dev = &pdev->dev;
+ struct xgbe_prv_data *pdata;
unsigned int ma_lo, ma_hi;
- unsigned int reg;
- int bar_mask;
- int ret;
+ struct pci_dev *rdev;
+ int bar_mask, ret;
+ u32 address;
pdata = xgbe_alloc_pdata(dev);
if (IS_ERR(pdata)) {
@@ -274,20 +166,31 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the PCS indirect addressing definition registers */
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
- if (rdev &&
- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
- } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
- (rdev->device == 0x14b5)) {
- pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
- pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
-
- /* Yellow Carp devices do not need cdr workaround */
- pdata->vdata->an_cdr_workaround = 0;
-
- /* Yellow Carp devices do not need rrc */
- pdata->vdata->enable_rrc = 0;
+ if (rdev && rdev->vendor == PCI_VENDOR_ID_AMD) {
+ switch (rdev->device) {
+ case XGBE_RV_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ break;
+ case XGBE_YC_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
+ break;
+ case XGBE_RN_PCI_DEVICE_ID:
+ pdata->xpcs_window_def_reg = PCS_V3_RN_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V3_RN_WINDOW_SELECT;
+ break;
+ default:
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ break;
+ }
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -295,7 +198,22 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_dev_put(rdev);
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ if (pdata->vdata->xpcs_access == XGBE_XPCS_ACCESS_V3) {
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ port_addr_size = PCS_RN_PORT_ADDR_SIZE *
+ XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ pdata->smn_base = PCS_RN_SMN_BASE_ADDR + port_addr_size;
+
+ address = pdata->smn_base + (pdata->xpcs_window_def_reg);
+ ret = amd_smn_read(0, address, &reg);
+ if (ret) {
+ pci_err(pdata->pcidev, "Failed to read data\n");
+ goto err_pci_enable;
+ }
+ } else {
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ }
+
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
@@ -473,6 +391,22 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
+static struct xgbe_version_data xgbe_v3 = {
+ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = XGBE_XPCS_ACCESS_V3,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
+ .tx_desc_prefetch = 5,
+ .rx_desc_prefetch = 5,
+ .an_cdr_workaround = 0,
+ .enable_rrc = 0,
+};
+
static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
@@ -510,6 +444,8 @@ static const struct pci_device_id xgbe_pci_table[] = {
.driver_data = (kernel_ulong_t)&xgbe_v2a },
{ PCI_VDEVICE(AMD, 0x1459),
.driver_data = (kernel_ulong_t)&xgbe_v2b },
+ { PCI_VDEVICE(AMD, 0x1641),
+ .driver_data = (kernel_ulong_t)&xgbe_v3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
index d16eae415f72..2e6b8ffe785c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 268399dfcf22..7a4dfa4e19c7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4365bd62942c..47d53e59ccf6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 7051bd7cf6dc..978c4dd01fa0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#include <linux/clk.h>
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-smn.h b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
new file mode 100644
index 000000000000..c6ae127ced03
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-smn.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#ifndef __SMN_H__
+#define __SMN_H__
+
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd/nb.h>
+
+#else
+
+static inline int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return -ENODEV;
+}
+
+#endif
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ed5d43c16d0e..6359bb87dc13 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1,117 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
*/
#ifndef __XGBE_H__
@@ -347,6 +238,15 @@
(_src)->link_modes._sname, \
__ETHTOOL_LINK_MODE_MASK_NBITS)
+/* XGBE PCI device id */
+#define XGBE_RV_PCI_DEVICE_ID 0x15d0
+#define XGBE_YC_PCI_DEVICE_ID 0x14b5
+#define XGBE_RN_PCI_DEVICE_ID 0x1630
+
+ /* Generic low and high masks */
+#define XGBE_GEN_HI_MASK GENMASK(31, 16)
+#define XGBE_GEN_LO_MASK GENMASK(15, 0)
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -565,6 +465,7 @@ enum xgbe_speed {
enum xgbe_xpcs_access {
XGBE_XPCS_ACCESS_V1 = 0,
XGBE_XPCS_ACCESS_V2,
+ XGBE_XPCS_ACCESS_V3,
};
enum xgbe_an_mode {
@@ -1060,6 +961,7 @@ struct xgbe_prv_data {
struct device *dev;
struct platform_device *phy_platdev;
struct device *phy_dev;
+ unsigned int smn_base;
/* Version related data */
struct xgbe_version_data *vdata;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index b9fdd61f1fdb..b50052c25a91 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -20,7 +20,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
-#include <linux/crc32poly.h>
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
@@ -796,59 +795,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
}
#ifndef SUNHME_MULTICAST
-/* Real fast bit-reversal algorithm, 6-bit values */
-static int reverse6[64] = {
- 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
- 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
- 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
- 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
- 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
- 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
- 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
- 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
-};
-
-static unsigned int
-crc416(unsigned int curval, unsigned short nxtval)
-{
- unsigned int counter, cur = curval, next = nxtval;
- int high_crc_set, low_data_set;
-
- /* Swap bytes */
- next = ((next & 0x00FF) << 8) | (next >> 8);
-
- /* Compute bit-by-bit */
- for (counter = 0; counter < 16; ++counter) {
- /* is high CRC bit set? */
- if ((cur & 0x80000000) == 0) high_crc_set = 0;
- else high_crc_set = 1;
-
- cur = cur << 1;
-
- if ((next & 0x0001) == 0) low_data_set = 0;
- else low_data_set = 1;
-
- next = next >> 1;
-
- /* do the XOR */
- if (high_crc_set ^ low_data_set) cur = cur ^ CRC32_POLY_BE;
- }
- return cur;
-}
-
-static unsigned int
-bmac_crc(unsigned short *address)
-{
- unsigned int newcrc;
-
- XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
- newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
- newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
- newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
-
- return(newcrc);
-}
-
/*
* Add requested mcast addr to BMac's hash table filter.
*
@@ -861,8 +807,7 @@ bmac_addhash(struct bmac_data *bp, unsigned char *addr)
unsigned short mask;
if (!(*addr)) return;
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc]++) return; /* This bit is already set */
mask = crc % 16;
mask = (unsigned char)1 << mask;
@@ -876,8 +821,7 @@ bmac_removehash(struct bmac_data *bp, unsigned char *addr)
unsigned char mask;
/* Now, delete the address from the filter copy, as indicated */
- crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
- crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
+ crc = crc32(~0, addr, ETH_ALEN) >> 26;
if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
mask = crc % 16;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index c1d1673c5749..b565189e5913 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
}
#endif
- skb_tx_timestamp(skb);
return aq_nic_xmit(aq_nic, skb);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index bf3aa46887a1..e71cd10e4e1f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
frags = aq_nic_map_skb(self, skb, ring);
+ skb_tx_timestamp(skb);
+
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 1bd4313215d7..636520bb4b8c 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -123,6 +123,7 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select CRC32
select PHYLIB
help
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a68fab1b05f0..fd35f4b4dc50 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -141,7 +141,7 @@ void bcmasp_flush_rx_port(struct bcmasp_intf *intf)
return;
}
- rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush);
+ rx_ctrl_core_wl(priv, mask, ASP_RX_CTRL_FLUSH);
}
static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
@@ -518,7 +518,7 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
int ret, i;
/* Write all filters to HW */
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
/* If the filter does not match the port, skip programming. */
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
@@ -551,7 +551,7 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -577,7 +577,7 @@ int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
struct bcmasp_priv *priv = intf->parent;
int cnt = 0, i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -602,7 +602,7 @@ bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf,
size_t fs_size = 0;
int i;
- for (i = 0; i < NUM_NET_FILTERS; i++) {
+ for (i = 0; i < priv->num_net_filters; i++) {
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -670,7 +670,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
int i, open_index = -1;
/* Check whether we exceed the filter table capacity */
- if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS)
+ if (loc != RX_CLS_LOC_ANY && loc >= priv->num_net_filters)
return ERR_PTR(-EINVAL);
/* If the filter location is busy (already claimed) and we are initializing
@@ -686,7 +686,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
/* Initialize the loop index based on the desired location or from 0 */
i = loc == RX_CLS_LOC_ANY ? 0 : loc;
- for ( ; i < NUM_NET_FILTERS; i++) {
+ for ( ; i < priv->num_net_filters; i++) {
/* Found matching network filter */
if (!init &&
priv->net_filters[i].claimed &&
@@ -779,7 +779,7 @@ static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en,
priv->mda_filters[i].en = en;
priv->mda_filters[i].port = intf->port;
- rx_filter_core_wl(priv, ((intf->channel + 8) |
+ rx_filter_core_wl(priv, ((intf->channel + priv->tx_chan_offset) |
(en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) |
ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)),
ASP_RX_FILTER_MDA_CFG(i));
@@ -865,7 +865,7 @@ void bcmasp_disable_all_filters(struct bcmasp_intf *intf)
res_count = bcmasp_total_res_mda_cnt(intf->parent);
/* Disable all filters held by this port */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en &&
priv->mda_filters[i].port == intf->port)
bcmasp_en_mda_filter(intf, 0, i);
@@ -909,7 +909,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
res_count = bcmasp_total_res_mda_cnt(intf->parent);
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
/* If filter not enabled or belongs to another port skip */
if (!priv->mda_filters[i].en ||
priv->mda_filters[i].port != intf->port)
@@ -924,7 +924,7 @@ int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr,
}
/* Create new filter if possible */
- for (i = res_count; i < NUM_MDA_FILTERS; i++) {
+ for (i = res_count; i < priv->num_mda_filters; i++) {
if (priv->mda_filters[i].en)
continue;
@@ -944,12 +944,12 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* Disable all filters and reset software view since the HW
* can lose context while in deep sleep suspend states
*/
- for (i = 0; i < NUM_MDA_FILTERS; i++) {
+ for (i = 0; i < priv->num_mda_filters; i++) {
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i));
priv->mda_filters[i].en = 0;
}
- for (i = 0; i < NUM_NET_FILTERS; i++)
+ for (i = 0; i < priv->num_net_filters; i++)
rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i));
/* Top level filter enable bit should be enabled at all times, set
@@ -966,18 +966,8 @@ static void bcmasp_core_init_filters(struct bcmasp_priv *priv)
/* ASP core initialization */
static void bcmasp_core_init(struct bcmasp_priv *priv)
{
- tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL);
- rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL);
-
- rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT),
- ASP_EDPKT_HDR_CFG);
- rx_edpkt_core_wl(priv,
- (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT),
- ASP_EDPKT_ENDI);
-
rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT);
rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT);
- rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT);
rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE);
@@ -1020,6 +1010,18 @@ static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT);
}
+static void bcmasp_core_clock_select_one_ctrl2(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+}
+
static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set)
{
u32 reg;
@@ -1108,7 +1110,7 @@ static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i)
return irq;
}
-static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
+static void bcmasp_init_wol(struct bcmasp_priv *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
@@ -1125,7 +1127,7 @@ static void bcmasp_init_wol_shared(struct bcmasp_priv *priv)
device_set_wakeup_capable(&pdev->dev, 1);
}
-static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en)
{
struct bcmasp_priv *priv = intf->parent;
struct device *dev = &priv->pdev->dev;
@@ -1154,54 +1156,12 @@ static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en)
}
}
-static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv)
+static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv)
{
if (priv->wol_irq > 0)
free_irq(priv->wol_irq, priv);
}
-static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- struct bcmasp_intf *intf;
- int irq;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- irq = bcmasp_get_and_request_irq(priv, intf->port + 1);
- if (irq < 0) {
- dev_warn(dev, "Failed to init WoL irq(port %d): %d\n",
- intf->port, irq);
- continue;
- }
-
- intf->wol_irq = irq;
- intf->wol_irq_enabled = false;
- device_set_wakeup_capable(&pdev->dev, 1);
- }
-}
-
-static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en)
-{
- struct device *dev = &intf->parent->pdev->dev;
-
- if (en ^ intf->wol_irq_enabled)
- irq_set_irq_wake(intf->wol_irq, en);
-
- intf->wol_irq_enabled = en;
- device_set_wakeup_enable(dev, en);
-}
-
-static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
-{
- struct bcmasp_intf *intf;
-
- list_for_each_entry(intf, &priv->intfs, list) {
- if (intf->wol_irq > 0)
- free_irq(intf->wol_irq, priv);
- }
-}
-
static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
{
u32 reg, phy_lpi_overwrite;
@@ -1220,70 +1180,53 @@ static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
usleep_range(50, 100);
}
-static struct bcmasp_hw_info v20_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
- .umac2fb = UMAC2FB_OFFSET,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT,
- .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH,
-};
-
-static const struct bcmasp_plat_data v20_plat_data = {
- .init_wol = bcmasp_init_wol_per_intf,
- .enable_wol = bcmasp_enable_wol_per_intf,
- .destroy_wol = bcmasp_wol_irq_destroy_per_intf,
- .core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v20_hw_info,
-};
-
-static struct bcmasp_hw_info v21_hw_info = {
- .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1,
- .umac2fb = UMAC2FB_OFFSET_2_1,
- .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_filt_out_frame_count =
- ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1,
- .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1,
-};
-
static const struct bcmasp_plat_data v21_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_one,
- .hw_info = &v21_hw_info,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
};
static const struct bcmasp_plat_data v22_plat_data = {
- .init_wol = bcmasp_init_wol_shared,
- .enable_wol = bcmasp_enable_wol_shared,
- .destroy_wol = bcmasp_wol_irq_destroy_shared,
.core_clock_select = bcmasp_core_clock_select_many,
- .hw_info = &v21_hw_info,
.eee_fixup = bcmasp_eee_fixup,
+ .num_mda_filters = 32,
+ .num_net_filters = 32,
+ .tx_chan_offset = 8,
+ .rx_ctrl_offset = 0x0,
+};
+
+static const struct bcmasp_plat_data v30_plat_data = {
+ .core_clock_select = bcmasp_core_clock_select_one_ctrl2,
+ .num_mda_filters = 20,
+ .num_net_filters = 16,
+ .tx_chan_offset = 0,
+ .rx_ctrl_offset = 0x10000,
};
static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
{
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
priv->core_clock_select = pdata->core_clock_select;
priv->eee_fixup = pdata->eee_fixup;
- priv->hw_info = pdata->hw_info;
+ priv->num_mda_filters = pdata->num_mda_filters;
+ priv->num_net_filters = pdata->num_net_filters;
+ priv->tx_chan_offset = pdata->tx_chan_offset;
+ priv->rx_ctrl_offset = pdata->rx_ctrl_offset;
}
static const struct of_device_id bcmasp_of_match[] = {
- { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
{ .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
+ { .compatible = "brcm,asp-v3.0", .data = &v30_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
- { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
+ { .compatible = "brcm,asp-v2.2-mdio", },
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match);
@@ -1365,6 +1308,17 @@ static int bcmasp_probe(struct platform_device *pdev)
* how many interfaces come up.
*/
bcmasp_core_init(priv);
+
+ priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters,
+ sizeof(*priv->mda_filters), GFP_KERNEL);
+ if (!priv->mda_filters)
+ return -ENOMEM;
+
+ priv->net_filters = devm_kcalloc(dev, priv->num_net_filters,
+ sizeof(*priv->net_filters), GFP_KERNEL);
+ if (!priv->net_filters)
+ return -ENOMEM;
+
bcmasp_core_init_filters(priv);
ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports");
@@ -1387,7 +1341,7 @@ static int bcmasp_probe(struct platform_device *pdev)
}
/* Check and enable WoL */
- priv->init_wol(priv);
+ bcmasp_init_wol(priv);
/* Drop the clock reference count now and let ndo_open()/ndo_close()
* manage it for us from now on.
@@ -1404,7 +1358,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (ret) {
netdev_err(intf->ndev,
"failed to register net_device: %d\n", ret);
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
goto of_put_exit;
}
@@ -1425,7 +1379,7 @@ static void bcmasp_remove(struct platform_device *pdev)
if (!priv)
return;
- priv->destroy_wol(priv);
+ bcmasp_wol_irq_destroy(priv);
bcmasp_remove_intfs(priv);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 8fc75bcedb70..74adfdb50e11 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -53,22 +53,15 @@
#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14
#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18
#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c
-/* asp2.1 diverges offsets here */
-/* ASP2.0 */
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24
-#define ASP_RX_CTRL_FLUSH 0x28
-#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
-#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
-#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30
-/* ASP2.1 */
-#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20
-#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24
-#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28
-#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c
-#define ASP_RX_CTRL_FLUSH_2_1 0x30
-#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38
+#define ASP_RX_CTRL_FB_9_FRAME_COUNT 0x20
+#define ASP_RX_CTRL_FB_10_FRAME_COUNT 0x24
+#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x28
+#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x2c
+#define ASP_RX_CTRL_FLUSH 0x30
+#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12))
+#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13))
+#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20))
+#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x38
#define ASP_RX_FILTER_OFFSET 0x80000
#define ASP_RX_FILTER_BLK_CTRL 0x0
@@ -345,9 +338,6 @@ struct bcmasp_intf {
u32 wolopts;
u8 sopass[SOPASS_MAX];
- /* Used if per intf wol irq */
- int wol_irq;
- unsigned int wol_irq_enabled:1;
};
#define NUM_NET_FILTERS 32
@@ -370,21 +360,13 @@ struct bcmasp_mda_filter {
u8 mask[ETH_ALEN];
};
-struct bcmasp_hw_info {
- u32 rx_ctrl_flush;
- u32 umac2fb;
- u32 rx_ctrl_fb_out_frame_count;
- u32 rx_ctrl_fb_filt_out_frame_count;
- u32 rx_ctrl_fb_rx_fifo_depth;
-};
-
struct bcmasp_plat_data {
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
- struct bcmasp_hw_info *hw_info;
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
};
struct bcmasp_priv {
@@ -399,18 +381,18 @@ struct bcmasp_priv {
int wol_irq;
unsigned long wol_irq_enabled_mask;
- void (*init_wol)(struct bcmasp_priv *priv);
- void (*enable_wol)(struct bcmasp_intf *intf, bool en);
- void (*destroy_wol)(struct bcmasp_priv *priv);
void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
+ unsigned int num_mda_filters;
+ unsigned int num_net_filters;
+ unsigned int tx_chan_offset;
+ unsigned int rx_ctrl_offset;
void __iomem *base;
- struct bcmasp_hw_info *hw_info;
struct list_head intfs;
- struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS];
+ struct bcmasp_mda_filter *mda_filters;
/* MAC destination address filters lock */
spinlock_t mda_lock;
@@ -418,7 +400,7 @@ struct bcmasp_priv {
/* Protects accesses to ASP_CTRL_CLOCK_CTRL */
spinlock_t clk_lock;
- struct bcmasp_net_filter net_filters[NUM_NET_FILTERS];
+ struct bcmasp_net_filter *net_filters;
/* Network filter lock */
struct mutex net_lock;
@@ -508,8 +490,8 @@ BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg);
#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21)
#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19)
#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16)
-#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15)
-#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14)
+#define PKT_OFFLOAD_EPKT_CSUM_L4 BIT(15)
+#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(14)
#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12)
#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10)
#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8)
@@ -541,12 +523,27 @@ BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET);
BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
-BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
+#define BCMASP_CORE_IO_MACRO_OFFSET(name, offset) \
+static inline u32 name##_core_rl(struct bcmasp_priv *priv, \
+ u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + priv->name##_offset + \
+ (offset) + off); \
+ return reg; \
+} \
+static inline void name##_core_wl(struct bcmasp_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + priv->name##_offset + \
+ (offset) + off); \
+}
+BCMASP_CORE_IO_MACRO_OFFSET(rx_ctrl, ASP_RX_CTRL_OFFSET);
+
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -599,4 +596,5 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
+void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index a537c121d3e2..4381a4cfd8c6 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -10,7 +10,6 @@
#include "bcmasp_intf_defs.h"
enum bcmasp_stat_type {
- BCMASP_STAT_RX_EDPKT,
BCMASP_STAT_RX_CTRL,
BCMASP_STAT_RX_CTRL_PER_INTF,
BCMASP_STAT_SOFT,
@@ -33,8 +32,6 @@ struct bcmasp_stats {
.reg_offset = offset, \
}
-#define STAT_BCMASP_RX_EDPKT(str, offset) \
- STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset)
#define STAT_BCMASP_RX_CTRL(str, offset) \
STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset)
#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \
@@ -42,11 +39,6 @@ struct bcmasp_stats {
/* Must match the order of struct bcmasp_mib_counters */
static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
- /* EDPKT counters */
- STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER),
- STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT),
- STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT),
/* ASP RX control */
STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac",
ASP_RX_CTRL_UMAC_0_FRAME_COUNT),
@@ -71,23 +63,6 @@ static const struct bcmasp_stats bcmasp_gstrings_stats[] = {
#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats)
-static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf,
- const struct bcmasp_stats *s)
-{
- struct bcmasp_priv *priv = intf->parent;
-
- if (!strcmp("Frames Out(Buffer)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_out_frame_count;
-
- if (!strcmp("Frames Out(Filters)", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_filt_out_frame_count;
-
- if (!strcmp("RX Buffer FIFO Depth", s->stat_string))
- return priv->hw_info->rx_ctrl_fb_rx_fifo_depth;
-
- return s->reg_offset;
-}
-
static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
@@ -126,13 +101,10 @@ static void bcmasp_update_mib_counters(struct bcmasp_intf *intf)
char *p;
s = &bcmasp_gstrings_stats[i];
- offset = bcmasp_stat_fixup_offset(intf, s);
+ offset = s->reg_offset;
switch (s->type) {
case BCMASP_STAT_SOFT:
continue;
- case BCMASP_STAT_RX_EDPKT:
- val = rx_edpkt_core_rl(intf->parent, offset);
- break;
case BCMASP_STAT_RX_CTRL:
val = rx_ctrl_core_rl(intf->parent, offset);
break;
@@ -215,7 +187,7 @@ static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass));
mutex_lock(&priv->wol_lock);
- priv->enable_wol(intf, !!intf->wolopts);
+ bcmasp_enable_wol(intf, !!intf->wolopts);
mutex_unlock(&priv->wol_lock);
return 0;
@@ -289,7 +261,7 @@ static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd)
memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs));
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
return 0;
}
@@ -336,7 +308,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLALL:
err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
- cmd->data = NUM_NET_FILTERS;
+ cmd->data = intf->parent->num_net_filters;
break;
default:
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 45ec1a9214a2..0d61b8580d72 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -180,14 +180,14 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
case htons(ETH_P_IP):
header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(0);
ip_proto = ip_hdr(skb)->protocol;
header_cnt += 2;
break;
case htons(ETH_P_IPV6):
header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
- epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
+ epkt |= PKT_OFFLOAD_EPKT_IP(1);
ip_proto = ipv6_hdr(skb)->nexthdr;
header_cnt += 2;
break;
@@ -198,12 +198,12 @@ static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
switch (ip_proto) {
case IPPROTO_TCP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
- epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
case IPPROTO_UDP:
header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
- epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
+ epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
header_cnt++;
break;
default:
@@ -818,9 +818,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
- tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
- tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
@@ -1185,7 +1183,7 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
{
/* Per port */
intf->res.umac = priv->base + UMC_OFFSET(intf);
- intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
+ intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
(intf->port * 0x4));
intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
@@ -1200,7 +1198,6 @@ static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
}
-#define MAX_IRQ_STR_LEN 64
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i)
{
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
index ad742612895f..af7418348e81 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h
@@ -118,8 +118,7 @@
#define UMC_PSW_MS 0x624
#define UMC_PSW_LS 0x628
-#define UMAC2FB_OFFSET_2_1 0x9f044
-#define UMAC2FB_OFFSET 0x9f03c
+#define UMAC2FB_OFFSET 0x9f044
#define UMAC2FB_CFG 0x0
#define UMAC2FB_CFG_OPUT_EN BIT(0)
#define UMAC2FB_CFG_VLAN_EN BIT(1)
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a461ec612e95..3e9c57196a39 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
struct phy_device *phy_dev;
int err;
- phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phy_dev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return PTR_ERR(phy_dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 86a5de44b6f3..d5495762c945 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -893,9 +893,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
-static bool bnxt_separate_head_pool(void)
+static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
+ return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -919,6 +919,20 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
+static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem);
+ return netmem;
+}
+
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
@@ -999,21 +1013,19 @@ static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
return next;
}
-static inline int bnxt_alloc_rx_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- u16 prod, gfp_t gfp)
+static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
- struct page *page;
- dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
+ dma_addr_t mapping;
+ netmem_ref netmem;
- page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
-
- if (!page)
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ if (!netmem)
return -ENOMEM;
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -1023,7 +1035,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
- rx_agg_buf->page = page;
+ rx_agg_buf->netmem = netmem;
rx_agg_buf->offset = offset;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1067,11 +1079,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
- u16 cons;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_agg_cmp *agg;
struct rx_bd *prod_bd;
- struct page *page;
+ netmem_ref netmem;
+ u16 cons;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
@@ -1088,11 +1100,11 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
- * set cons_rx_buf->page to NULL first.
+ * set cons_rx_buf->netmem to 0 first.
*/
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
- prod_rx_buf->page = page;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
+ prod_rx_buf->netmem = netmem;
prod_rx_buf->offset = cons_rx_buf->offset;
prod_rx_buf->mapping = cons_rx_buf->mapping;
@@ -1218,29 +1230,35 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct skb_shared_info *shinfo,
- u16 idx, u32 agg_bufs, bool tpa,
- struct xdp_buff *xdp)
+static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct sk_buff *skb,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u16 prod = rxr->rx_agg_prod;
+ struct skb_shared_info *shinfo;
+ struct bnxt_rx_ring_info *rxr;
u32 i, total_frag_len = 0;
bool p5_tpa = false;
+ u16 prod;
+
+ rxr = bnapi->rx_ring;
+ prod = rxr->rx_agg_prod;
if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
+ if (skb)
+ shinfo = skb_shinfo(skb);
+ else
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+
for (i = 0; i < agg_bufs; i++) {
- skb_frag_t *frag = &shinfo->frags[i];
- u16 cons, frag_len;
- struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
- struct page *page;
- dma_addr_t mapping;
+ struct rx_agg_cmp *agg;
+ u16 cons, frag_len;
+ netmem_ref netmem;
if (p5_tpa)
agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
@@ -1251,27 +1269,41 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_frag_fill_page_desc(frag, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
- shinfo->nr_frags = i + 1;
+ if (skb) {
+ skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len, BNXT_RX_PAGE_SIZE);
+ } else {
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem,
+ cons_rx_buf->offset,
+ frag_len);
+ shinfo->nr_frags = i + 1;
+ }
__clear_bit(cons, rxr->rx_agg_bmap);
- /* It is possible for bnxt_alloc_rx_page() to allocate
+ /* It is possible for bnxt_alloc_rx_netmem() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
- mapping = cons_rx_buf->mapping;
- page = cons_rx_buf->page;
- cons_rx_buf->page = NULL;
+ netmem = cons_rx_buf->netmem;
+ cons_rx_buf->netmem = 0;
- if (xdp && page_is_pfmemalloc(page))
+ if (xdp && netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) {
+ if (skb) {
+ skb->len -= frag_len;
+ skb->data_len -= frag_len;
+ skb->truesize -= BNXT_RX_PAGE_SIZE;
+ }
+
--shinfo->nr_frags;
- cons_rx_buf->page = page;
+ cons_rx_buf->netmem = netmem;
- /* Update prod since possibly some pages have been
+ /* Update prod since possibly some netmems have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
@@ -1279,8 +1311,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return 0;
}
- dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- bp->rx_dir);
+ page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
+ BNXT_RX_PAGE_SIZE);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -1289,32 +1321,28 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
return total_frag_len;
}
-static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
u32 total_frag_len = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
- agg_bufs, tpa, NULL);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ skb, NULL);
if (!total_frag_len) {
skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
- skb->data_len += total_frag_len;
- skb->len += total_frag_len;
- skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
-static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct xdp_buff *xdp, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
u32 total_frag_len = 0;
@@ -1322,8 +1350,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
if (!xdp_buff_has_frags(xdp))
shinfo->nr_frags = 0;
- total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
- idx, agg_bufs, tpa, xdp);
+ total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa,
+ NULL, xdp);
if (total_frag_len) {
xdp_buff_set_frags_flag(xdp);
shinfo->nr_frags = agg_bufs;
@@ -1895,7 +1923,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs,
+ true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats->rx.rx_oom_discards += 1;
@@ -2176,9 +2205,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (bnxt_xdp_attached(bp, rxr)) {
bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
if (agg_bufs) {
- u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
- cp_cons, agg_bufs,
- false);
+ u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp,
+ cp_cons,
+ agg_bufs,
+ false);
if (!frag_len)
goto oom_next_rx;
@@ -2230,7 +2260,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (agg_bufs) {
if (!xdp_active) {
- skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons,
+ agg_bufs, false);
if (!skb)
goto oom_next_rx;
} else {
@@ -3449,15 +3480,15 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
- struct page *page = rx_agg_buf->page;
+ netmem_ref netmem = rx_agg_buf->netmem;
- if (!page)
+ if (!netmem)
continue;
- rx_agg_buf->page = NULL;
+ rx_agg_buf->netmem = 0;
__clear_bit(i, rxr->rx_agg_bmap);
- page_pool_recycle_direct(rxr->page_pool, page);
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
}
}
@@ -3750,7 +3781,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;
@@ -3781,15 +3812,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
- pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
return PTR_ERR(pool);
rxr->page_pool = pool;
- if (bnxt_separate_head_pool()) {
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnxt_separate_head_pool(rxr)) {
pp.pool_size = max(bp->rx_ring_size, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pool = page_pool_create(&pp);
if (IS_ERR(pool))
goto err_destroy_pp;
@@ -4201,6 +4236,8 @@ static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
rxr->page_pool->p.napi = NULL;
rxr->page_pool = NULL;
+ rxr->head_pool->p.napi = NULL;
+ rxr->head_pool = NULL;
memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
ring = &rxr->rx_ring_struct;
@@ -4325,16 +4362,16 @@ static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
rxr->rx_prod = prod;
}
-static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr,
- int ring_nr)
+static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
u32 prod;
int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
+ if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -4375,7 +4412,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+ bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
@@ -10077,7 +10114,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input *req;
u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc, len;
+ int rc, len, max_tmo_secs;
rc = hwrm_req_init(bp, req, HWRM_VER_GET);
if (rc)
@@ -10150,9 +10187,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
if (!bp->hwrm_cmd_max_timeout)
bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
- else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
- netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
- bp->hwrm_cmd_max_timeout / 1000);
+ max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT ||
+ max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) {
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n",
+ max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT);
+ }
+#endif
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -14013,13 +14055,28 @@ static void bnxt_unlock_sp(struct bnxt *bp)
netdev_unlock(bp->dev);
}
+/* Same as bnxt_lock_sp() with additional rtnl_lock */
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+{
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+ netdev_lock(bp->dev);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ netdev_unlock(bp->dev);
+ rtnl_unlock();
+}
+
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_lock_sp(bp);
+ bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_unlock_sp(bp);
+ bnxt_rtnl_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -14027,9 +14084,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_lock_sp(bp);
+ bnxt_rtnl_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_unlock_sp(bp);
+ bnxt_rtnl_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -14068,7 +14125,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_unlock_sp(bp);
+ bnxt_rtnl_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -14960,15 +15017,17 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!netdev_trylock(bp->dev)) {
+ while (!rtnl_trylock()) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
+ netdev_lock(bp->dev);
rc = bnxt_open(bp->dev);
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
netdev_unlock(bp->dev);
+ rtnl_unlock();
goto ulp_start;
}
@@ -14988,6 +15047,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
netdev_unlock(bp->dev);
+ rtnl_unlock();
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
netdev_lock(bp->dev);
@@ -15711,6 +15771,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
clone->rx_agg_prod = 0;
clone->rx_sw_agg_prod = 0;
clone->rx_next_cons = 0;
+ clone->need_head_pool = false;
rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
if (rc)
@@ -15753,7 +15814,7 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ bnxt_alloc_one_rx_ring_netmem(bp, clone, idx);
if (bp->flags & BNXT_FLAG_TPA)
bnxt_alloc_one_tpa_info_data(bp, clone);
@@ -15769,7 +15830,7 @@ err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
page_pool_destroy(clone->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(clone))
page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
clone->head_pool = NULL;
@@ -15788,7 +15849,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
rxr->head_pool = NULL;
@@ -15879,6 +15940,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->page_pool = clone->page_pool;
rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;
+ rxr->need_head_pool = clone->need_head_pool;
bnxt_copy_rx_ring(bp, rxr, clone);
@@ -15936,7 +15998,7 @@ err_reset:
rc);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- bnxt_reset_task(bp, true);
+ netif_close(dev);
return rc;
}
@@ -15964,7 +16026,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
page_pool_disable_direct_recycling(rxr->page_pool);
- if (bnxt_separate_head_pool())
+ if (bnxt_separate_head_pool(rxr))
page_pool_disable_direct_recycling(rxr->head_pool);
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -16752,6 +16814,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ rtnl_lock();
netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
@@ -16796,6 +16859,7 @@ static int bnxt_resume(struct device *device)
resume_exit:
netdev_unlock(bp->dev);
+ rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16961,6 +17025,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
+ rtnl_lock();
netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
@@ -16978,6 +17043,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
netdev_unlock(netdev);
+ rtnl_unlock();
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index bc8b3b7e915d..fda0d3cc6227 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -903,7 +903,7 @@ struct bnxt_sw_rx_bd {
};
struct bnxt_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
@@ -1106,6 +1106,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index a000d3f630bd..ce97befd3cb3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -506,9 +506,16 @@ err:
start_utc, coredump.total_segs + 1,
rc);
kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
+ if (!rc) {
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ /* The actual coredump length can be smaller than the FW
+ * reported length earlier. Use the ethtool provided length.
+ */
+ if (buf_len)
+ *dump_len = buf_len;
+ } else if (rc == -ENOBUFS) {
netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ }
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 15ca51b5d204..fb5f5b063c3d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -58,7 +58,7 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000U
+#define HWRM_CMD_MAX_TIMEOUT 60000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index a8e930d5dbb0..84c4812414fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -20,6 +20,7 @@
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include <linux/auxiliary_bus.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -148,7 +149,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- int i = 0;
ulp = edev->ulp_tbl;
netdev_lock(dev);
@@ -164,10 +164,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
mutex_unlock(&edev->en_dev_lock);
netdev_unlock(dev);
return;
@@ -309,14 +305,12 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- netdev_lock(bp->dev);
- ops = rcu_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
- netdev_unlock(bp->dev);
}
}
@@ -335,8 +329,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- netdev_lock(bp->dev);
- ops = rcu_dereference(ulp->ulp_ops);
+ ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -348,7 +341,6 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
- netdev_unlock(bp->dev);
kfree(ent);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 7fa3b8d1ebd2..7b9dd8ebe4bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -10,9 +10,6 @@
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
-#define BNXT_ROCE_ULP 0
-#define BNXT_MAX_ULP 1
-
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
@@ -50,7 +47,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- atomic_t ref_count;
};
struct bnxt_en_dev {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index e675611777b5..4a6d8cb9f970 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -425,9 +425,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
- xdp_features_set_redirect_target(dev, true);
+ xdp_features_set_redirect_target_locked(dev, true);
} else {
- xdp_features_clear_redirect_target(dev);
+ xdp_features_clear_redirect_target_locked(dev);
bnxt_set_rx_skb_mode(bp, false);
}
bp->tx_nr_rings_xdp = tx_xdp;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 73d78dcb774d..fa0077bc67b7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -969,12 +969,13 @@ static int bcmgenet_set_pauseparam(struct net_device *dev,
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
- BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_RTNL = -1,
BCMGENET_STAT_MIB_RX,
BCMGENET_STAT_MIB_TX,
BCMGENET_STAT_RUNT,
BCMGENET_STAT_MISC,
BCMGENET_STAT_SOFT,
+ BCMGENET_STAT_SOFT64,
};
struct bcmgenet_stats {
@@ -984,13 +985,15 @@ struct bcmgenet_stats {
enum bcmgenet_stat_type type;
/* reg offset from UMAC base for misc counters */
u16 reg_offset;
+ /* sync for u64 stats counters */
+ int syncp_offset;
};
-#define STAT_NETDEV(m) { \
+#define STAT_RTNL(m) { \
.stat_string = __stringify(m), \
- .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
- .stat_offset = offsetof(struct net_device_stats, m), \
- .type = BCMGENET_STAT_NETDEV, \
+ .stat_sizeof = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m), \
+ .type = BCMGENET_STAT_RTNL, \
}
#define STAT_GENET_MIB(str, m, _type) { \
@@ -1000,6 +1003,14 @@ struct bcmgenet_stats {
.type = _type, \
}
+#define STAT_GENET_SOFT_MIB64(str, s, m) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->s.m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, s.m), \
+ .type = BCMGENET_STAT_SOFT64, \
+ .syncp_offset = offsetof(struct bcmgenet_priv, s.syncp), \
+}
+
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
@@ -1014,18 +1025,38 @@ struct bcmgenet_stats {
}
#define STAT_GENET_Q(num) \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
- tx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
- tx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
- rx_rings[num].bytes), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
- rx_rings[num].packets), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
- rx_rings[num].errors), \
- STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
- rx_rings[num].dropped)
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_packets", \
+ tx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_bytes", \
+ tx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_errors", \
+ tx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("txq" __stringify(num) "_dropped", \
+ tx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].stats64, bytes), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_packets", \
+ rx_rings[num].stats64, packets), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_errors", \
+ rx_rings[num].stats64, errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].stats64, dropped), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_multicast", \
+ rx_rings[num].stats64, multicast), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_missed", \
+ rx_rings[num].stats64, missed), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_length_errors", \
+ rx_rings[num].stats64, length_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_over_errors", \
+ rx_rings[num].stats64, over_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_crc_errors", \
+ rx_rings[num].stats64, crc_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_frame_errors", \
+ rx_rings[num].stats64, frame_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_fragmented_errors", \
+ rx_rings[num].stats64, fragmented_errors), \
+ STAT_GENET_SOFT_MIB64("rxq" __stringify(num) "_broadcast", \
+ rx_rings[num].stats64, broadcast)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -1037,15 +1068,20 @@ struct bcmgenet_stats {
*/
static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
- STAT_NETDEV(rx_errors),
- STAT_NETDEV(tx_errors),
- STAT_NETDEV(rx_dropped),
- STAT_NETDEV(tx_dropped),
- STAT_NETDEV(multicast),
+ STAT_RTNL(rx_packets),
+ STAT_RTNL(tx_packets),
+ STAT_RTNL(rx_bytes),
+ STAT_RTNL(tx_bytes),
+ STAT_RTNL(rx_errors),
+ STAT_RTNL(tx_errors),
+ STAT_RTNL(rx_dropped),
+ STAT_RTNL(tx_dropped),
+ STAT_RTNL(multicast),
+ STAT_RTNL(rx_missed_errors),
+ STAT_RTNL(rx_length_errors),
+ STAT_RTNL(rx_over_errors),
+ STAT_RTNL(rx_crc_errors),
+ STAT_RTNL(rx_frame_errors),
/* UniMAC RSV counters */
STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
@@ -1133,6 +1169,20 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+#define BCMGENET_STATS64_ADD(stats, m, v) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_add(&stats->m, v); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
+#define BCMGENET_STATS64_INC(stats, m) \
+ do { \
+ u64_stats_update_begin(&stats->syncp); \
+ u64_stats_inc(&stats->m); \
+ u64_stats_update_end(&stats->syncp); \
+ } while (0)
+
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1216,8 +1266,9 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
s = &bcmgenet_gstrings_stats[i];
switch (s->type) {
- case BCMGENET_STAT_NETDEV:
+ case BCMGENET_STAT_RTNL:
case BCMGENET_STAT_SOFT:
+ case BCMGENET_STAT_SOFT64:
continue;
case BCMGENET_STAT_RUNT:
offset += BCMGENET_STAT_OFFSET;
@@ -1255,28 +1306,40 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct rtnl_link_stats64 stats64;
+ struct u64_stats_sync *syncp;
+ unsigned int start;
int i;
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
- dev->netdev_ops->ndo_get_stats(dev);
+ dev_get_stats(dev, &stats64);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
s = &bcmgenet_gstrings_stats[i];
- if (s->type == BCMGENET_STAT_NETDEV)
- p = (char *)&dev->stats;
- else
- p = (char *)priv;
- p += s->stat_offset;
- if (sizeof(unsigned long) != sizeof(u32) &&
- s->stat_sizeof == sizeof(unsigned long))
- data[i] = *(unsigned long *)p;
- else
- data[i] = *(u32 *)p;
+ p = (char *)priv;
+
+ if (s->type == BCMGENET_STAT_SOFT64) {
+ syncp = (struct u64_stats_sync *)(p + s->syncp_offset);
+ do {
+ start = u64_stats_fetch_begin(syncp);
+ data[i] = u64_stats_read((u64_stats_t *)(p + s->stat_offset));
+ } while (u64_stats_fetch_retry(syncp, start));
+ } else {
+ if (s->type == BCMGENET_STAT_RTNL)
+ p = (char *)&stats64;
+
+ p += s->stat_offset;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -1856,6 +1919,7 @@ static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int txbds_processed = 0;
unsigned int bytes_compl = 0;
@@ -1896,8 +1960,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = c_index;
- ring->packets += pkts_compl;
- ring->bytes += bytes_compl;
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->packets, pkts_compl);
+ u64_stats_add(&stats->bytes, bytes_compl);
+ u64_stats_update_end(&stats->syncp);
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
@@ -1983,8 +2049,10 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
* the transmit checksum offsets in the descriptors
*/
static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct bcmgenet_tx_ring *ring)
{
+ struct bcmgenet_tx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = netdev_priv(dev);
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -2001,7 +2069,7 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
if (!new_skb) {
dev_kfree_skb_any(skb);
priv->mib.tx_realloc_tsb_failed++;
- dev->stats.tx_dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
return NULL;
}
dev_consume_skb_any(skb);
@@ -2089,7 +2157,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
GENET_CB(skb)->bytes_sent = skb->len;
/* add the Transmit Status Block */
- skb = bcmgenet_add_tsb(dev, skb);
+ skb = bcmgenet_add_tsb(dev, skb, ring);
if (!skb) {
ret = NETDEV_TX_OK;
goto out;
@@ -2231,6 +2299,7 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int budget)
{
+ struct bcmgenet_rx_stats64 *stats = &ring->stats64;
struct bcmgenet_priv *priv = ring->priv;
struct net_device *dev = priv->dev;
struct enet_cb *cb;
@@ -2253,7 +2322,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- ring->errors += discards;
+ BCMGENET_STATS64_ADD(stats, missed, discards);
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -2279,7 +2348,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- ring->dropped++;
+ BCMGENET_STATS64_INC(stats, dropped);
goto next;
}
@@ -2306,8 +2375,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(len > RX_BUF_LENGTH)) {
netif_err(priv, rx_status, dev, "oversized packet\n");
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ BCMGENET_STATS64_INC(stats, length_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2315,7 +2383,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- ring->errors++;
+ BCMGENET_STATS64_INC(stats, fragmented_errors);
dev_kfree_skb_any(skb);
goto next;
}
@@ -2328,15 +2396,22 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
(unsigned int)dma_flag);
+ u64_stats_update_begin(&stats->syncp);
if (dma_flag & DMA_RX_CRC_ERROR)
- dev->stats.rx_crc_errors++;
+ u64_stats_inc(&stats->crc_errors);
if (dma_flag & DMA_RX_OV)
- dev->stats.rx_over_errors++;
+ u64_stats_inc(&stats->over_errors);
if (dma_flag & DMA_RX_NO)
- dev->stats.rx_frame_errors++;
+ u64_stats_inc(&stats->frame_errors);
if (dma_flag & DMA_RX_LG)
- dev->stats.rx_length_errors++;
- dev->stats.rx_errors++;
+ u64_stats_inc(&stats->length_errors);
+ if ((dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER)) == DMA_RX_RXER)
+ u64_stats_inc(&stats->errors);
+ u64_stats_update_end(&stats->syncp);
dev_kfree_skb_any(skb);
goto next;
} /* error packet */
@@ -2356,10 +2431,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- ring->packets++;
- ring->bytes += len;
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->packets);
+ u64_stats_add(&stats->bytes, len);
if (dma_flag & DMA_RX_MULT)
- dev->stats.multicast++;
+ u64_stats_inc(&stats->multicast);
+ else if (dma_flag & DMA_RX_BRDCAST)
+ u64_stats_inc(&stats->broadcast);
+ u64_stats_update_end(&stats->syncp);
/* Notify kernel */
napi_gro_receive(&ring->napi, skb);
@@ -3420,7 +3500,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
netif_trans_update(dev);
- dev->stats.tx_errors++;
+ BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors);
netif_tx_wake_all_queues(dev);
}
@@ -3509,39 +3589,72 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+static void bcmgenet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
- unsigned long rx_bytes = 0, rx_packets = 0;
- unsigned long rx_errors = 0, rx_dropped = 0;
- struct bcmgenet_tx_ring *tx_ring;
- struct bcmgenet_rx_ring *rx_ring;
+ struct bcmgenet_tx_stats64 *tx_stats;
+ struct bcmgenet_rx_stats64 *rx_stats;
+ u64 rx_length_errors, rx_over_errors;
+ u64 rx_missed, rx_fragmented_errors;
+ u64 rx_crc_errors, rx_frame_errors;
+ u64 tx_errors, tx_dropped;
+ u64 rx_errors, rx_dropped;
+ u64 tx_bytes, tx_packets;
+ u64 rx_bytes, rx_packets;
+ unsigned int start;
unsigned int q;
+ u64 multicast;
for (q = 0; q <= priv->hw_params->tx_queues; q++) {
- tx_ring = &priv->tx_rings[q];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
+ tx_stats = &priv->tx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&tx_stats->syncp);
+ tx_bytes = u64_stats_read(&tx_stats->bytes);
+ tx_packets = u64_stats_read(&tx_stats->packets);
+ tx_errors = u64_stats_read(&tx_stats->errors);
+ tx_dropped = u64_stats_read(&tx_stats->dropped);
+ } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_errors += tx_errors;
+ stats->tx_dropped += tx_dropped;
}
for (q = 0; q <= priv->hw_params->rx_queues; q++) {
- rx_ring = &priv->rx_rings[q];
-
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
+ rx_stats = &priv->rx_rings[q].stats64;
+ do {
+ start = u64_stats_fetch_begin(&rx_stats->syncp);
+ rx_bytes = u64_stats_read(&rx_stats->bytes);
+ rx_packets = u64_stats_read(&rx_stats->packets);
+ rx_errors = u64_stats_read(&rx_stats->errors);
+ rx_dropped = u64_stats_read(&rx_stats->dropped);
+ rx_missed = u64_stats_read(&rx_stats->missed);
+ rx_length_errors = u64_stats_read(&rx_stats->length_errors);
+ rx_over_errors = u64_stats_read(&rx_stats->over_errors);
+ rx_crc_errors = u64_stats_read(&rx_stats->crc_errors);
+ rx_frame_errors = u64_stats_read(&rx_stats->frame_errors);
+ rx_fragmented_errors = u64_stats_read(&rx_stats->fragmented_errors);
+ multicast = u64_stats_read(&rx_stats->multicast);
+ } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
+
+ rx_errors += rx_length_errors;
+ rx_errors += rx_crc_errors;
+ rx_errors += rx_frame_errors;
+ rx_errors += rx_fragmented_errors;
+
+ stats->rx_bytes += rx_bytes;
+ stats->rx_packets += rx_packets;
+ stats->rx_errors += rx_errors;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_missed_errors += rx_missed;
+ stats->rx_length_errors += rx_length_errors;
+ stats->rx_over_errors += rx_over_errors;
+ stats->rx_crc_errors += rx_crc_errors;
+ stats->rx_frame_errors += rx_frame_errors;
+ stats->multicast += multicast;
}
-
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_errors = rx_errors;
- dev->stats.rx_missed_errors = rx_errors;
- dev->stats.rx_dropped = rx_dropped;
- return &dev->stats;
}
static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
@@ -3569,7 +3682,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
- .ndo_get_stats = bcmgenet_get_stats,
+ .ndo_get_stats64 = bcmgenet_get_stats64,
.ndo_change_carrier = bcmgenet_change_carrier,
};
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 10c631bbe964..5ec3979779ec 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -155,6 +155,30 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
+struct bcmgenet_tx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+};
+
+struct bcmgenet_rx_stats64 {
+ struct u64_stats_sync syncp;
+ u64_stats_t bytes;
+ u64_stats_t packets;
+ u64_stats_t errors;
+ u64_stats_t dropped;
+ u64_stats_t multicast;
+ u64_stats_t broadcast;
+ u64_stats_t missed;
+ u64_stats_t length_errors;
+ u64_stats_t over_errors;
+ u64_stats_t crc_errors;
+ u64_stats_t frame_errors;
+ u64_stats_t fragmented_errors;
+};
+
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
@@ -515,8 +539,7 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
- unsigned long packets;
- unsigned long bytes;
+ struct bcmgenet_tx_stats64 stats64;
unsigned int index; /* ring index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
@@ -540,10 +563,7 @@ struct bcmgenet_net_dim {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
- unsigned long bytes;
- unsigned long packets;
- unsigned long errors;
- unsigned long dropped;
+ struct bcmgenet_rx_stats64 stats64;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 71c619d2bea5..b6437ba7a2eb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -625,7 +625,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
.asym_pause = 0,
};
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+ phydev = fixed_phy_register(&fphy_status, NULL);
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d1f541af4e3b..ff47e96b9124 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,7 +54,7 @@
#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/crc32poly.h>
+#include <linux/crc32.h>
#include <linux/dmi.h>
#include <net/checksum.h>
@@ -9809,26 +9809,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
static inline u32 calc_crc(unsigned char *buf, int len)
{
- u32 reg;
- u32 tmp;
- int j, k;
-
- reg = 0xffffffff;
-
- for (j = 0; j < len; j++) {
- reg ^= buf[j];
-
- for (k = 0; k < 8; k++) {
- tmp = reg & 0x01;
-
- reg >>= 1;
-
- if (tmp)
- reg ^= CRC32_POLY_LE;
- }
- }
-
- return ~reg;
+ return ~crc32(~0, buf, len);
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index a03eff3d4425..50eb54ecf1ba 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1735,7 +1735,7 @@ bnad_iocpf_sem_timeout(struct timer_list *t)
* Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
- * 2 del_timer_sync
+ * 2 timer_delete_sync
* 3 mod_timer
*/
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1fe8ec37491b..d1f1ae5ea161 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -997,22 +997,15 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
@@ -5290,7 +5283,11 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ goto err_out_free_netdev;
+ }
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06397cc8bb36..5211759bfe47 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1389,11 +1389,9 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
- /* Check if timestamp is requested */
- if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- skb_tx_timestamp(skb);
+ /* Check if hw timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
return;
- }
/* Tx timestamping not supported along with TSO, so ignore request */
if (skb_shinfo(skb)->gso_size)
@@ -1472,6 +1470,8 @@ static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 608cc6af5af1..3b7ad744b2dd 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1605,10 +1605,10 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
- goto err_disable_device;
+ goto err_zero_drv_data;
}
/* MAP configuration registers */
@@ -1616,7 +1616,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!bgx->reg_base) {
dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_zero_drv_data;
}
set_max_bgx_per_node(pdev);
@@ -1688,10 +1688,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
-err_release_regions:
- pci_release_regions(pdev);
-err_disable_device:
- pci_disable_device(pdev);
+err_zero_drv_data:
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -1710,8 +1707,6 @@ static void bgx_remove(struct pci_dev *pdev)
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
bgx_vnic[bgx->bgx_id] = NULL;
- pci_release_regions(pdev);
- pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 3b7068832f95..4a0e2d2eb60a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -351,7 +351,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
adapter->msg_enable = val;
}
-static const char stats_strings[][ETH_GSTRING_LEN] = {
+static const char stats_strings[][ETH_GSTRING_LEN] __nonstring_array = {
"TxOctetsOK",
"TxOctetsBad",
"TxUnicastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 551c279dc14b..51395c96b2e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6480,10 +6480,11 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+static int cxgb4_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
@@ -6494,7 +6495,8 @@ static int cxgb4_xfrm_add_state(struct xfrm_state *x,
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(dev, x,
+ extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6502,9 +6504,9 @@ out_unlock:
return ret;
}
-static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+static void cxgb4_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6514,15 +6516,15 @@ static void cxgb4_xfrm_del_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
}
-static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+static void cxgb4_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
- struct adapter *adap = netdev2adap(x->xso.dev);
+ struct adapter *adap = netdev2adap(dev);
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
@@ -6532,7 +6534,7 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x)
if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
goto out_unlock;
- adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(dev, x);
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index b08356060fb4..7bab8da8f6e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -29,7 +29,7 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops cxgb4_thermal_ops = {
+static const struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index f991a28a71c3..f2d533acb056 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1533,7 +1533,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
q = &adap->sge.ethtxq[qidx + pi->first_qset];
}
- skb_tx_timestamp(skb);
reclaim_completed_tx(adap, &q->q, -1, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -1706,6 +1705,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
+ skb_tx_timestamp(skb);
+
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
@@ -2268,7 +2269,6 @@ static int ethofld_hard_xmit(struct net_device *dev,
d = &eosw_txq->desc[eosw_txq->last_pidx];
skb = d->skb;
- skb_tx_timestamp(skb);
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
@@ -2373,6 +2373,7 @@ write_wr_headers:
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index baba96883f48..ecd9a0bd5e18 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -75,9 +75,12 @@ static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
@@ -223,7 +226,8 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+static int ch_ipsec_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
@@ -302,14 +306,16 @@ out:
return res;
}
-static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct net_device *dev,
+ struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct net_device *dev,
+ struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index e8e460a92e0e..4e2096e49684 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1640,6 +1640,7 @@ static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
+ skb_tx_timestamp(skb);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
@@ -1903,7 +1904,6 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
th = tcp_hdr(nskb);
skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
- skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 9c12e967e9f1..301b3f3114af 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -26,6 +26,7 @@
#define ENIC_WQ_MAX 256
#define ENIC_RQ_MAX 256
+#define ENIC_RQ_MIN_DEFAULT 8
#define ENIC_WQ_NAPI_BUDGET 256
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c753c35b26eb..6ef8a0d90bce 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2296,7 +2296,8 @@ static int enic_adjust_resources(struct enic *enic)
* used based on which resource is the most constrained
*/
wq_avail = min(enic->wq_avail, ENIC_WQ_MAX);
- rq_default = netif_get_num_default_rss_queues();
+ rq_default = max(netif_get_num_default_rss_queues(),
+ ENIC_RQ_MIN_DEFAULT);
rq_avail = min3(enic->rq_avail, ENIC_RQ_MAX, rq_default);
max_queues = min(enic->cq_avail,
enic->intr_avail - ENIC_MSIX_RESERVED_INTR);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 517a15904fb0..6a2004bbe87f 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1144,6 +1144,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
+ bool tcp = false;
void *buffer;
u16 mss;
int ret;
@@ -1151,6 +1152,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word1 = skb->len;
word3 = SOF_BIT;
+ /* Determine if we are doing TCP */
+ if (skb->protocol == htons(ETH_P_IP))
+ tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
+ else
+ /* IPv6 */
+ tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
/* This means we are dealing with TCP and skb->len is the
@@ -1163,8 +1171,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
mss, skb->len);
word1 |= TSS_MTU_ENABLE_BIT;
word3 |= mss;
+ } else if (tcp) {
+ /* Even if we are not using TSO, use the hardware offloader
+ * for transferring the TCP frame: this hardware has partial
+ * TCP awareness (called TOE - TCP Offload Engine) and will
+ * according to the datasheet put packets belonging to the
+ * same TCP connection in the same queue for the TOE/TSO
+ * engine to process. The engine will deal with chopping
+ * up frames that exceed ETH_DATA_LEN which the
+ * checksumming engine cannot handle (see below) into
+ * manageable chunks. It flawlessly deals with quite big
+ * frames and frames containing custom DSA EtherTypes.
+ */
+ mss = netdev->mtu + skb_tcp_all_headers(skb);
+ mss = min(mss, skb->len);
+ netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
+ skb->len, netdev->mtu, mss);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
} else if (skb->len >= ETH_FRAME_LEN) {
- /* Hardware offloaded checksumming isn't working on frames
+ /* Hardware offloaded checksumming isn't working on non-TCP frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
@@ -1181,21 +1207,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int tcp = 0;
-
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
word1 |= TSS_IP_CHKSUM_BIT;
- tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
- } else { /* IPv6 */
+ else
word1 |= TSS_IPV6_ENABLE_BIT;
- tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
- }
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c8c53121557f..bec76e7bf5dd 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1411,7 +1411,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
return -ENODEV;
ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 5930cdec6f2f..e593273b2867 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -375,7 +375,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_request_regions(pdev, DRV_NAME))
+ if (pcim_request_all_regions(pdev, DRV_NAME))
goto err_out_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 232e839a9d07..038a0400c1f9 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
+
+ spin_lock_init(&np->stats_lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
@@ -865,7 +867,6 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
- dev->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
dev->stats.tx_fifo_errors++;
@@ -902,9 +903,15 @@ tx_error (struct net_device *dev, int tx_status)
rio_set_led_mode(dev);
/* Let TxStartThresh stay default value */
}
+
+ spin_lock(&np->stats_lock);
/* Maximum Collisions */
if (tx_status & 0x08)
dev->stats.collisions++;
+
+ dev->stats.tx_errors++;
+ spin_unlock(&np->stats_lock);
+
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
@@ -1073,7 +1080,9 @@ get_stats (struct net_device *dev)
int i;
#endif
unsigned int stat_reg;
+ unsigned long flags;
+ spin_lock_irqsave(&np->stats_lock, flags);
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1123,6 +1132,9 @@ get_stats (struct net_device *dev)
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
+
+ spin_unlock_irqrestore(&np->stats_lock, flags);
+
return &dev->stats;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 0e33e2eaae96..ba679025e866 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -329,18 +329,18 @@ enum _pcs_anlpar {
};
typedef struct t_SROM {
- u16 config_param; /* 0x00 */
- u16 asic_ctrl; /* 0x02 */
- u16 sub_vendor_id; /* 0x04 */
- u16 sub_system_id; /* 0x06 */
- u16 pci_base_1; /* 0x08 (IP1000A only) */
- u16 pci_base_2; /* 0x0a (IP1000A only) */
+ __le16 config_param; /* 0x00 */
+ __le16 asic_ctrl; /* 0x02 */
+ __le16 sub_vendor_id; /* 0x04 */
+ __le16 sub_system_id; /* 0x06 */
+ __le16 pci_base_1; /* 0x08 (IP1000A only) */
+ __le16 pci_base_2; /* 0x0a (IP1000A only) */
__le16 led_mode; /* 0x0c (IP1000A only) */
- u16 reserved1[9]; /* 0x0e-0x1f */
+ __le16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
u8 sib[204]; /* 0x30-0xfb */
- u32 crc; /* 0xfc-0xff */
+ __le32 crc; /* 0xfc-0xff */
} SROM_t, *PSROM_t;
/* Ioctl custom data */
@@ -372,6 +372,8 @@ struct netdev_private {
struct pci_dev *pdev;
void __iomem *ioaddr;
void __iomem *eeprom_addr;
+ // To ensure synchronization when stats are updated.
+ spinlock_t stats_lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 51b8377edd1d..d730af4a50c7 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
hdr->version = 0;
- if (BE3_chip(adapter) || lancer_chip(adapter))
+ else if (BE3_chip(adapter) || lancer_chip(adapter))
hdr->version = 1;
else
hdr->version = 2;
@@ -2615,7 +2615,11 @@ err:
return status;
}
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+/*
+ * Since the cookie is text, add a parsing-skipped space to keep it from
+ * ever being matched on storage holding this source file.
+ */
+static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** ";
static bool phy_flashing_required(struct be_adapter *adapter)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d70818f06be7..5e2d3ddb5d43 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1415,7 +1415,7 @@ struct flash_section_entry {
} __packed;
struct flash_section_info {
- u8 cookie[32];
+ u8 cookie[32] __nonstring;
struct flash_section_hdr fsec_hdr;
struct flash_section_entry fsec_entry[32];
} __packed;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 625245b0845c..eba73246f986 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -67,6 +67,8 @@
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -386,8 +388,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -479,7 +480,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
return mapped;
}
-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
@@ -505,6 +507,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
mapped = 0;
}
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -558,11 +563,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- int count = 1;
struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
int length;
- int i;
int retval;
+ int i;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
@@ -579,7 +585,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry = &tx->entry[tx->write];
entry->skb = skb;
- retval = tsnep_tx_map(skb, tx, count);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
@@ -591,9 +603,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
}
length = retval;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
@@ -844,8 +853,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 17ec35e75a65..a98d5af3f9e3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1906,7 +1906,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_phy_connect;
}
- phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
+ phydev = fixed_phy_register(&ncsi_phy_status, np);
if (IS_ERR(phydev)) {
dev_err(&pdev->dev, "failed to register fixed PHY device\n");
err = PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index a2d7300925a8..bbef47c3480c 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -71,7 +71,6 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
- select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4948b4906584..23c23cca2620 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3089,15 +3089,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
return nxmit;
}
-static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct dpaa_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- switch (config.tx_type) {
+ return 0;
+}
+
+static int dpaa_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa_priv *priv = netdev_priv(dev);
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
@@ -3112,7 +3122,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
/* Couldn't disable rx/tx timestamping separately.
* Do nothing here.
*/
@@ -3121,28 +3131,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
- int ret = -EINVAL;
struct dpaa_priv *priv = netdev_priv(net_dev);
- if (cmd == SIOCGMIIREG) {
- if (net_dev->phydev)
- return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
- cmd);
- }
-
- if (cmd == SIOCSHWTSTAMP)
- return dpaa_ts_ioctl(net_dev, rq, cmd);
-
- return ret;
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
}
static const struct net_device_ops dpaa_ops = {
@@ -3160,6 +3159,8 @@ static const struct net_device_ops dpaa_ops = {
.ndo_change_mtu = dpaa_change_mtu,
.ndo_bpf = dpaa_xdp,
.ndo_xdp_xmit = dpaa_xdp_xmit,
+ .ndo_hwtstamp_get = dpaa_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa_hwtstamp_set,
};
static int dpaa_napi_add(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 29886a8ba73f..2ec2c3dab250 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2585,40 +2585,52 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
return 0;
}
-static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int dpaa2_eth_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
if (!dpaa2_ptp)
return -EINVAL;
- if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->tx_tstamp_type = config.tx_type;
+ priv->tx_tstamp_type = config->tx_type;
break;
default:
return -ERANGE;
}
- if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
priv->rx_tstamp = false;
} else {
priv->rx_tstamp = true;
/* TS is set for all frame types, not only those requested */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
dpaa2_ptp_onestep_reg_update_method(priv);
- return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
+}
+
+static int dpaa2_eth_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
+ config->tx_type = priv->tx_tstamp_type;
+ config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
+
+ return 0;
}
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2626,9 +2638,6 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
int err;
- if (cmd == SIOCSHWTSTAMP)
- return dpaa2_eth_ts_ioctl(dev, rq, cmd);
-
mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv)) {
@@ -3034,7 +3043,9 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid,
+ .ndo_hwtstamp_get = dpaa2_eth_hwtstamp_get,
+ .ndo_hwtstamp_set = dpaa2_eth_hwtstamp_set,
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 6c2779047dcd..e917132d3714 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -15,10 +15,16 @@ config NXP_ENETC_PF_COMMON
If compiled as module (M), the module name is nxp-enetc-pf-common.
+config NXP_NETC_LIB
+ tristate
+ help
+ This module provides common functionalities for both ENETC and NETC
+ Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
+ flower and debugfs interfaces and so on.
+
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
@@ -36,10 +42,10 @@ config FSL_ENETC
config NXP_ENETC4
tristate "ENETC4 PF driver"
depends on PCI_MSI
- select MDIO_DEVRES
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select NXP_ENETC_PF_COMMON
+ select NXP_NETC_LIB
select PHYLINK
select DIMLIB
help
@@ -73,7 +79,7 @@ config FSL_ENETC_IERB
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && MDIO_DEVRES && MDIO_BUS
+ depends on PCI && PHYLIB
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 6fd27ee4fcd1..f1c5ad45fd76 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -6,6 +6,9 @@ fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_NXP_ENETC_PF_COMMON) += nxp-enetc-pf-common.o
nxp-enetc-pf-common-y := enetc_pf_common.o
+obj-$(CONFIG_NXP_NETC_LIB) += nxp-netc-lib.o
+nxp-netc-lib-y := ntmp.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
@@ -13,6 +16,7 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_NXP_ENETC4) += nxp-enetc4.o
nxp-enetc4-y := enetc4_pf.o
+nxp-enetc4-$(CONFIG_DEBUG_FS) += enetc4_debugfs.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3ee52f4b1166..dcc3fbac3481 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -36,6 +36,42 @@ static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
enetc_mm_commit_preemptible_tcs(priv);
}
+static int enetc_mac_addr_hash_idx(const u8 *addr)
+{
+ u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
+ u64 mask = 0;
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ mask |= BIT_ULL(i * 6);
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight64(fold & (mask << i)) & 0x1) << i;
+
+ return res;
+}
+
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr)
+{
+ int idx = enetc_mac_addr_hash_idx(addr);
+
+ /* add hash table entry */
+ __set_bit(idx, filter->mac_hash_table);
+ filter->mac_addr_cnt++;
+}
+EXPORT_SYMBOL_GPL(enetc_add_mac_addr_ht_filter);
+
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
+{
+ filter->mac_addr_cnt = 0;
+
+ bitmap_zero(filter->mac_hash_table,
+ ENETC_MADDR_HASH_TBL_SZ);
+}
+EXPORT_SYMBOL_GPL(enetc_reset_mac_addr_filter);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
@@ -2379,7 +2415,7 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
for (i = 0; i < si->num_rss; i++)
rss_table[i] = i % num_groups;
- enetc_set_rss_table(si, rss_table, si->num_rss);
+ si->ops->set_rss_table(si, rss_table, si->num_rss);
kfree(rss_table);
@@ -2394,6 +2430,20 @@ static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
}
+static void enetc_set_rss(struct net_device *ndev, int en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 reg;
+
+ enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
+
+ reg = enetc_rd(hw, ENETC_SIMR);
+ reg &= ~ENETC_SIMR_RSSE;
+ reg |= (en) ? ENETC_SIMR_RSSE : 0;
+ enetc_wr(hw, ENETC_SIMR, reg);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2410,13 +2460,13 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
if (si->hw_features & ENETC_SI_F_LSO)
enetc_set_lso_flags_mask(hw);
- /* TODO: RSS support for i.MX95 will be supported later, and the
- * is_enetc_rev1() condition will be removed
- */
- if (si->num_rss && is_enetc_rev1(si)) {
+ if (si->num_rss) {
err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
if (err)
return err;
+
+ if (priv->ndev->features & NETIF_F_RXHASH)
+ enetc_set_rss(priv->ndev, true);
}
return 0;
@@ -3209,22 +3259,6 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(enetc_get_stats);
-static int enetc_set_rss(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- u32 reg;
-
- enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
-
- reg = enetc_rd(hw, ENETC_SIMR);
- reg &= ~ENETC_SIMR_RSSE;
- reg |= (en) ? ENETC_SIMR_RSSE : 0;
- enetc_wr(hw, ENETC_SIMR, reg);
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -3262,16 +3296,17 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
}
EXPORT_SYMBOL_GPL(enetc_set_features);
-static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- struct hwtstamp_config config;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ return -EOPNOTSUPP;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
@@ -3290,13 +3325,13 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
new_offloads |= ENETC_F_RX_TSTAMP;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
}
if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
@@ -3309,42 +3344,36 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
priv->active_offloads = new_offloads;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_set);
-static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct hwtstamp_config config;
- config.flags = 0;
+ if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
else if (priv->active_offloads & ENETC_F_TX_TSTAMP)
- config.tx_type = HWTSTAMP_TX_ON;
+ config->tx_type = HWTSTAMP_TX_ON;
else
- config.tx_type = HWTSTAMP_TX_OFF;
+ config->tx_type = HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ config->rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
+EXPORT_SYMBOL_GPL(enetc_hwtstamp_get);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- if (cmd == SIOCSHWTSTAMP)
- return enetc_hwtstamp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return enetc_hwtstamp_get(ndev, rq);
- }
-
if (!priv->phylink)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 4ad4eb5c5a74..872d2cbd088b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/fsl/ntmp.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
#include <linux/dim.h>
@@ -22,6 +23,18 @@
#define ENETC_CBD_DATA_MEM_ALIGN 64
+#define ENETC_MADDR_HASH_TBL_SZ 64
+
+enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
+
+struct enetc_mac_filter {
+ union {
+ char mac_addr[ETH_ALEN];
+ DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
+ };
+ int mac_addr_cnt;
+};
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -266,6 +279,19 @@ struct enetc_platform_info {
const struct enetc_drvdata *data;
};
+struct enetc_si;
+
+/*
+ * This structure defines the some common hooks for ENETC PSI and VSI.
+ * In addition, since VSI only uses the struct enetc_si as its private
+ * driver data, so this structure also define some hooks specifically
+ * for VSI. For VSI-specific hooks, the format is ‘vf_*()’.
+ */
+struct enetc_si_ops {
+ int (*get_rss_table)(struct enetc_si *si, u32 *table, int count);
+ int (*set_rss_table)(struct enetc_si *si, const u32 *table, int count);
+};
+
/* PCI IEP device data */
struct enetc_si {
struct pci_dev *pdev;
@@ -274,7 +300,10 @@ struct enetc_si {
struct net_device *ndev; /* back ref. */
- struct enetc_cbdr cbd_ring;
+ union {
+ struct enetc_cbdr cbd_ring; /* Only ENETC 1.0 */
+ struct ntmp_user ntmp_user; /* ENETC 4.1 and later */
+ };
int num_rx_rings; /* how many rings are available in the SI */
int num_tx_rings;
@@ -284,6 +313,11 @@ struct enetc_si {
u16 revision;
int hw_features;
const struct enetc_drvdata *drvdata;
+ const struct enetc_si_ops *ops;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct rx_mode_task;
+ struct dentry *debugfs_root;
};
#define ENETC_SI_ALIGN 32
@@ -466,6 +500,9 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
void enetc_free_si_resources(struct enetc_ndev_priv *priv);
int enetc_configure_si(struct enetc_ndev_priv *priv);
int enetc_get_driver_data(struct enetc_si *si);
+void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
+ const unsigned char *addr);
+void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter);
int enetc_open(struct net_device *ndev);
int enetc_close(struct net_device *ndev);
@@ -481,6 +518,12 @@ int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
+int enetc_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config);
+int enetc_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
/* ethtool */
extern const struct ethtool_ops enetc_pf_ethtool_ops;
extern const struct ethtool_ops enetc4_pf_ethtool_ops;
@@ -493,15 +536,19 @@ void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
struct enetc_cbdr *cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si);
+void enetc4_teardown_cbdr(struct enetc_si *si);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index);
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes);
int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count);
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count);
static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
struct enetc_cbd *cbd,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
new file mode 100644
index 000000000000..1b1591dce73d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright 2025 NXP */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+
+#include "enetc_pf.h"
+#include "enetc4_debugfs.h"
+
+static void enetc_show_si_mac_hash_filter(struct seq_file *s, int i)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ u32 hash_h, hash_l;
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIUMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIUMHFR1(i));
+ seq_printf(s, "SI %d unicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+
+ hash_l = enetc_port_rd(hw, ENETC4_PSIMMHFR0(i));
+ hash_h = enetc_port_rd(hw, ENETC4_PSIMMHFR1(i));
+ seq_printf(s, "SI %d multicast MAC hash filter: 0x%08x%08x\n",
+ i, hash_h, hash_l);
+}
+
+static int enetc_mac_filter_show(struct seq_file *s, void *data)
+{
+ struct enetc_si *si = s->private;
+ struct enetc_hw *hw = &si->hw;
+ struct maft_entry_data maft;
+ struct enetc_pf *pf;
+ int i, err, num_si;
+ u32 val;
+
+ pf = enetc_si_priv(si);
+ num_si = pf->caps.num_vsi + 1;
+
+ val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+ for (i = 0; i < num_si; i++) {
+ seq_printf(s, "SI %d Unicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_UP(i) & val));
+ seq_printf(s, "SI %d Multicast Promiscuous mode: %s\n", i,
+ str_enabled_disabled(PSIPMMR_SI_MAC_MP(i) & val));
+ }
+
+ /* MAC hash filter table */
+ for (i = 0; i < num_si; i++)
+ enetc_show_si_mac_hash_filter(s, i);
+
+ if (!pf->num_mfe)
+ return 0;
+
+ /* MAC address filter table */
+ seq_puts(s, "MAC address filter table\n");
+ for (i = 0; i < pf->num_mfe; i++) {
+ memset(&maft, 0, sizeof(maft));
+ err = ntmp_maft_query_entry(&si->ntmp_user, i, &maft);
+ if (err)
+ return err;
+
+ seq_printf(s, "Entry %d, MAC: %pM, SI bitmap: 0x%04x\n", i,
+ maft.keye.mac_addr, le16_to_cpu(maft.cfge.si_bitmap));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(enetc_mac_filter);
+
+void enetc_create_debugfs(struct enetc_si *si)
+{
+ struct net_device *ndev = si->ndev;
+ struct dentry *root;
+
+ root = debugfs_create_dir(netdev_name(ndev), NULL);
+ if (IS_ERR(root))
+ return;
+
+ si->debugfs_root = root;
+
+ debugfs_create_file("mac_filter", 0444, root, si, &enetc_mac_filter_fops);
+}
+
+void enetc_remove_debugfs(struct enetc_si *si)
+{
+ debugfs_remove(si->debugfs_root);
+ si->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
new file mode 100644
index 000000000000..96caca35f79d
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+
+#ifndef __ENETC4_DEBUGFS_H
+#define __ENETC4_DEBUGFS_H
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+void enetc_create_debugfs(struct enetc_si *si);
+void enetc_remove_debugfs(struct enetc_si *si);
+#else
+static inline void enetc_create_debugfs(struct enetc_si *si)
+{
+}
+
+static inline void enetc_remove_debugfs(struct enetc_si *si)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index 695cb07c74bc..aa25b445d301 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -99,6 +99,18 @@
#define ENETC4_PSICFGR2(a) ((a) * 0x80 + 0x2018)
#define PSICFGR2_NUM_MSIX GENMASK(5, 0)
+/* Port station interface a unicast MAC hash filter register 0/1 */
+#define ENETC4_PSIUMHFR0(a) ((a) * 0x80 + 0x2050)
+#define ENETC4_PSIUMHFR1(a) ((a) * 0x80 + 0x2054)
+
+/* Port station interface a multicast MAC hash filter register 0/1 */
+#define ENETC4_PSIMMHFR0(a) ((a) * 0x80 + 0x2058)
+#define ENETC4_PSIMMHFR1(a) ((a) * 0x80 + 0x205c)
+
+/* Port station interface a VLAN hash filter register 0/1 */
+#define ENETC4_PSIVHFR0(a) ((a) * 0x80 + 0x2060)
+#define ENETC4_PSIVHFR1(a) ((a) * 0x80 + 0x2064)
+
#define ENETC4_PMCAPR 0x4004
#define PMCAPR_HD BIT(8)
#define PMCAPR_FP GENMASK(10, 9)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index 73ac8c6afb3a..b3dc1afeefd1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -8,9 +8,19 @@
#include <linux/unaligned.h>
#include "enetc_pf_common.h"
+#include "enetc4_debugfs.h"
#define ENETC_SI_MAX_RING_NUM 8
+#define ENETC_MAC_FILTER_TYPE_UC BIT(0)
+#define ENETC_MAC_FILTER_TYPE_MC BIT(1)
+#define ENETC_MAC_FILTER_TYPE_ALL (ENETC_MAC_FILTER_TYPE_UC | \
+ ENETC_MAC_FILTER_TYPE_MC)
+
+struct enetc_mac_addr {
+ u8 addr[ETH_ALEN];
+};
+
static void enetc4_get_port_caps(struct enetc_pf *pf)
{
struct enetc_hw *hw = &pf->si->hw;
@@ -26,6 +36,9 @@ static void enetc4_get_port_caps(struct enetc_pf *pf)
val = enetc_port_rd(hw, ENETC4_PMCAPR);
pf->caps.half_duplex = (val & PMCAPR_HD) ? 1 : 0;
+
+ val = enetc_port_rd(hw, ENETC4_PSIMAFCAPR);
+ pf->caps.mac_filter_num = val & PSIMAFCAPR_NUM_MAC_AFTE;
}
static void enetc4_pf_set_si_primary_mac(struct enetc_hw *hw, int si,
@@ -56,6 +69,200 @@ static void enetc4_pf_get_si_primary_mac(struct enetc_hw *hw, int si,
put_unaligned_le16(lower, addr + 4);
}
+static void enetc4_pf_set_si_mac_promisc(struct enetc_hw *hw, int si,
+ bool uc_promisc, bool mc_promisc)
+{
+ u32 val = enetc_port_rd(hw, ENETC4_PSIPMMR);
+
+ if (uc_promisc)
+ val |= PSIPMMR_SI_MAC_UP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_UP(si);
+
+ if (mc_promisc)
+ val |= PSIPMMR_SI_MAC_MP(si);
+ else
+ val &= ~PSIPMMR_SI_MAC_MP(si);
+
+ enetc_port_wr(hw, ENETC4_PSIPMMR, val);
+}
+
+static void enetc4_pf_set_si_uc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIUMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIUMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_si_mc_hash_filter(struct enetc_hw *hw, int si,
+ u64 hash)
+{
+ enetc_port_wr(hw, ENETC4_PSIMMHFR0(si), lower_32_bits(hash));
+ enetc_port_wr(hw, ENETC4_PSIMMHFR1(si), upper_32_bits(hash));
+}
+
+static void enetc4_pf_set_loopback(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ u32 val;
+
+ val = enetc_port_mac_rd(si, ENETC4_PM_CMD_CFG(0));
+ val = u32_replace_bits(val, en ? 1 : 0, PM_CMD_CFG_LOOP_EN);
+ /* Default to select MAC level loopback mode if loopback is enabled. */
+ val = u32_replace_bits(val, en ? LPBCK_MODE_MAC_LEVEL : 0,
+ PM_CMD_CFG_LPBK_MODE);
+
+ enetc_port_mac_wr(si, ENETC4_PM_CMD_CFG(0), val);
+}
+
+static void enetc4_pf_clear_maft_entries(struct enetc_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_mfe; i++)
+ ntmp_maft_delete_entry(&pf->si->ntmp_user, i);
+
+ pf->num_mfe = 0;
+}
+
+static int enetc4_pf_add_maft_entries(struct enetc_pf *pf,
+ struct enetc_mac_addr *mac,
+ int mac_cnt)
+{
+ struct maft_entry_data maft = {};
+ u16 si_bit = BIT(0);
+ int i, err;
+
+ maft.cfge.si_bitmap = cpu_to_le16(si_bit);
+ for (i = 0; i < mac_cnt; i++) {
+ ether_addr_copy(maft.keye.mac_addr, mac[i].addr);
+ err = ntmp_maft_add_entry(&pf->si->ntmp_user, i, &maft);
+ if (unlikely(err)) {
+ pf->num_mfe = i;
+ goto clear_maft_entries;
+ }
+ }
+
+ pf->num_mfe = mac_cnt;
+
+ return 0;
+
+clear_maft_entries:
+ enetc4_pf_clear_maft_entries(pf);
+
+ return err;
+}
+
+static int enetc4_pf_set_uc_exact_filter(struct enetc_pf *pf)
+{
+ int max_num_mfe = pf->caps.mac_filter_num;
+ struct enetc_mac_filter mac_filter = {};
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_mac_addr *mac_tbl;
+ struct netdev_hw_addr *ha;
+ int i = 0, err;
+ int mac_cnt;
+
+ netif_addr_lock_bh(ndev);
+
+ mac_cnt = netdev_uc_count(ndev);
+ if (!mac_cnt) {
+ netif_addr_unlock_bh(ndev);
+ /* clear both MAC hash and exact filters */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+
+ return 0;
+ }
+
+ if (mac_cnt > max_num_mfe) {
+ err = -ENOSPC;
+ goto unlock_netif_addr;
+ }
+
+ mac_tbl = kcalloc(mac_cnt, sizeof(*mac_tbl), GFP_ATOMIC);
+ if (!mac_tbl) {
+ err = -ENOMEM;
+ goto unlock_netif_addr;
+ }
+
+ netdev_for_each_uc_addr(ha, ndev) {
+ enetc_add_mac_addr_ht_filter(&mac_filter, ha->addr);
+ ether_addr_copy(mac_tbl[i++].addr, ha->addr);
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Set temporary unicast hash filters in case of Rx loss when
+ * updating MAC address filter table
+ */
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, *mac_filter.mac_hash_table);
+ enetc4_pf_clear_maft_entries(pf);
+
+ if (!enetc4_pf_add_maft_entries(pf, mac_tbl, i))
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+
+ kfree(mac_tbl);
+
+ return 0;
+
+unlock_netif_addr:
+ netif_addr_unlock_bh(ndev);
+
+ return err;
+}
+
+static void enetc4_pf_set_mac_hash_filter(struct enetc_pf *pf, int type)
+{
+ struct net_device *ndev = pf->si->ndev;
+ struct enetc_mac_filter *mac_filter;
+ struct enetc_hw *hw = &pf->si->hw;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(ndev);
+ if (type & ENETC_MAC_FILTER_TYPE_UC) {
+ mac_filter = &pf->mac_filter[UC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_uc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_uc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC) {
+ mac_filter = &pf->mac_filter[MC];
+ enetc_reset_mac_addr_filter(mac_filter);
+ netdev_for_each_mc_addr(ha, ndev)
+ enetc_add_mac_addr_ht_filter(mac_filter, ha->addr);
+
+ enetc4_pf_set_si_mc_hash_filter(hw, 0,
+ *mac_filter->mac_hash_table);
+ }
+ netif_addr_unlock_bh(ndev);
+}
+
+static void enetc4_pf_set_mac_filter(struct enetc_pf *pf, int type)
+{
+ /* Currently, the MAC address filter table (MAFT) only has 4 entries,
+ * and multiple multicast addresses for filtering will be configured
+ * in the default network configuration, so MAFT is only suitable for
+ * unicast filtering. If the number of unicast addresses exceeds the
+ * table capacity, the MAC hash filter will be used.
+ */
+ if (type & ENETC_MAC_FILTER_TYPE_UC && enetc4_pf_set_uc_exact_filter(pf)) {
+ /* Fall back to the MAC hash filter */
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_UC);
+ /* Clear the old MAC exact filter */
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (type & ENETC_MAC_FILTER_TYPE_MC)
+ enetc4_pf_set_mac_hash_filter(pf, ENETC_MAC_FILTER_TYPE_MC);
+}
+
static const struct enetc_pf_ops enetc4_pf_ops = {
.set_si_primary_mac = enetc4_pf_set_si_primary_mac,
.get_si_primary_mac = enetc4_pf_get_si_primary_mac,
@@ -226,24 +433,6 @@ static void enetc4_set_trx_frame_size(struct enetc_pf *pf)
enetc4_pf_reset_tc_msdu(&si->hw);
}
-static void enetc4_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
-{
- int i;
-
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC4_PRSSKR(i), ((u32 *)bytes)[i]);
-}
-
-static void enetc4_set_default_rss_key(struct enetc_pf *pf)
-{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
- struct enetc_hw *hw = &pf->si->hw;
-
- /* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc4_set_rss_key(hw, hash_key);
-}
-
static void enetc4_enable_trx(struct enetc_pf *pf)
{
struct enetc_hw *hw = &pf->si->hw;
@@ -256,10 +445,25 @@ static void enetc4_configure_port(struct enetc_pf *pf)
{
enetc4_configure_port_si(pf);
enetc4_set_trx_frame_size(pf);
- enetc4_set_default_rss_key(pf);
+ enetc_set_default_rss_key(pf);
enetc4_enable_trx(pf);
}
+static int enetc4_init_ntmp_user(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ /* For ENETC 4.1, all table versions are 0 */
+ memset(&user->tbl, 0, sizeof(user->tbl));
+
+ return enetc4_setup_cbdr(si);
+}
+
+static void enetc4_free_ntmp_user(struct enetc_si *si)
+{
+ enetc4_teardown_cbdr(si);
+}
+
static int enetc4_pf_init(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
@@ -272,17 +476,99 @@ static int enetc4_pf_init(struct enetc_pf *pf)
return err;
}
+ err = enetc4_init_ntmp_user(pf->si);
+ if (err) {
+ dev_err(dev, "Failed to init CBDR\n");
+ return err;
+ }
+
enetc4_configure_port(pf);
return 0;
}
+static void enetc4_pf_free(struct enetc_pf *pf)
+{
+ enetc4_free_ntmp_user(pf->si);
+}
+
+static void enetc4_psi_do_set_rx_mode(struct work_struct *work)
+{
+ struct enetc_si *si = container_of(work, struct enetc_si, rx_mode_task);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ struct net_device *ndev = si->ndev;
+ struct enetc_hw *hw = &si->hw;
+ bool uc_promisc = false;
+ bool mc_promisc = false;
+ int type = 0;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_PROMISC) {
+ uc_promisc = true;
+ mc_promisc = true;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ mc_promisc = true;
+ type = ENETC_MAC_FILTER_TYPE_UC;
+ } else {
+ type = ENETC_MAC_FILTER_TYPE_ALL;
+ }
+
+ enetc4_pf_set_si_mac_promisc(hw, 0, uc_promisc, mc_promisc);
+
+ if (uc_promisc) {
+ enetc4_pf_set_si_uc_hash_filter(hw, 0, 0);
+ enetc4_pf_clear_maft_entries(pf);
+ }
+
+ if (mc_promisc)
+ enetc4_pf_set_si_mc_hash_filter(hw, 0, 0);
+
+ /* Set new MAC filter */
+ enetc4_pf_set_mac_filter(pf, type);
+
+ rtnl_unlock();
+}
+
+static void enetc4_pf_set_rx_mode(struct net_device *ndev)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ queue_work(si->workqueue, &si->rx_mode_task);
+}
+
+static int enetc4_pf_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ bool promisc_en = !(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+
+ enetc4_pf_set_si_vlan_promisc(hw, 0, promisc_en);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ enetc4_pf_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
+
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
static const struct net_device_ops enetc4_ndev_ops = {
.ndo_open = enetc_open,
.ndo_stop = enetc_close,
.ndo_start_xmit = enetc_xmit,
.ndo_get_stats = enetc_get_stats,
.ndo_set_mac_address = enetc_pf_set_mac_addr,
+ .ndo_set_rx_mode = enetc4_pf_set_rx_mode,
+ .ndo_set_features = enetc4_pf_set_features,
+ .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
};
static struct phylink_pcs *
@@ -617,6 +903,19 @@ static void enetc4_link_deinit(struct enetc_ndev_priv *priv)
enetc_mdiobus_destroy(pf);
}
+static int enetc4_psi_wq_task_init(struct enetc_si *si)
+{
+ char wq_name[24];
+
+ INIT_WORK(&si->rx_mode_task, enetc4_psi_do_set_rx_mode);
+ snprintf(wq_name, sizeof(wq_name), "enetc-%s", pci_name(si->pdev));
+ si->workqueue = create_singlethread_workqueue(wq_name);
+ if (!si->workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int enetc4_pf_netdev_create(struct enetc_si *si)
{
struct device *dev = &si->pdev->dev;
@@ -657,6 +956,12 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
if (err)
goto err_link_init;
+ err = enetc4_psi_wq_task_init(si);
+ if (err) {
+ dev_err(dev, "Failed to init workqueue\n");
+ goto err_wq_init;
+ }
+
err = register_netdev(ndev);
if (err) {
dev_err(dev, "Failed to register netdev\n");
@@ -666,6 +971,8 @@ static int enetc4_pf_netdev_create(struct enetc_si *si)
return 0;
err_reg_netdev:
+ destroy_workqueue(si->workqueue);
+err_wq_init:
enetc4_link_deinit(priv);
err_link_init:
enetc_free_msix(priv);
@@ -683,11 +990,18 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ cancel_work(&si->rx_mode_task);
+ destroy_workqueue(si->workqueue);
enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
+static const struct enetc_si_ops enetc4_psi_ops = {
+ .get_rss_table = enetc4_get_rss_table,
+ .set_rss_table = enetc4_set_rss_table,
+};
+
static int enetc4_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -712,6 +1026,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
"Couldn't map PF only space\n");
si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc4_psi_ops;
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
@@ -728,14 +1043,28 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
enetc_get_si_caps(si);
- return enetc4_pf_netdev_create(si);
+ err = enetc4_pf_netdev_create(si);
+ if (err)
+ goto err_netdev_create;
+
+ enetc_create_debugfs(si);
+
+ return 0;
+
+err_netdev_create:
+ enetc4_pf_free(pf);
+
+ return err;
}
static void enetc4_pf_remove(struct pci_dev *pdev)
{
struct enetc_si *si = pci_get_drvdata(pdev);
+ struct enetc_pf *pf = enetc_si_priv(si);
+ enetc_remove_debugfs(si);
enetc4_pf_netdev_destroy(si);
+ enetc4_pf_free(pf);
}
static const struct pci_device_id enetc4_pf_id_table[] = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 20bfdf7fb4b4..3d5f31879d5c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -60,6 +60,44 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
}
EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
+int enetc4_setup_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+ struct device *dev = &si->pdev->dev;
+ struct enetc_hw *hw = &si->hw;
+ struct netc_cbdr_regs regs;
+
+ user->cbdr_num = 1;
+ user->dev = dev;
+ user->ring = devm_kcalloc(dev, user->cbdr_num,
+ sizeof(struct netc_cbdr), GFP_KERNEL);
+ if (!user->ring)
+ return -ENOMEM;
+
+ /* set CBDR cache attributes */
+ enetc_wr(hw, ENETC_SICAR2,
+ ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
+
+ regs.pir = hw->reg + ENETC_SICBDRPIR;
+ regs.cir = hw->reg + ENETC_SICBDRCIR;
+ regs.mr = hw->reg + ENETC_SICBDRMR;
+ regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
+ regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
+ regs.lenr = hw->reg + ENETC_SICBDRLENR;
+
+ return ntmp_init_cbdr(user->ring, dev, &regs);
+}
+EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
+
+void enetc4_teardown_cbdr(struct enetc_si *si)
+{
+ struct ntmp_user *user = &si->ntmp_user;
+
+ ntmp_free_cbdr(user->ring);
+ user->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
+
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
struct enetc_cbd *dest_cbd;
@@ -256,3 +294,15 @@ int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_table);
+
+int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
+{
+ return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
+
+int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
+{
+ return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
+}
+EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index ece3ae28ba82..d38cd36be4a6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -141,7 +141,7 @@ static const struct {
static const struct {
int reg;
- char name[ETH_GSTRING_LEN];
+ char name[ETH_GSTRING_LEN] __nonstring;
} enetc_port_counters[] = {
{ ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
{ ENETC_PM_RALN(0), "MAC rx alignment errors" },
@@ -264,7 +264,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
- ethtool_puts(&data, enetc_port_counters[i].name);
+ ethtool_cpy(&data, enetc_port_counters[i].name);
break;
}
@@ -625,6 +625,29 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
return 0;
}
+/* i.MX95 ENETC does not support RFS table, but we can use ingress port
+ * filter table to implement Wake-on-LAN filter or drop the matched flow,
+ * so the implementation will be different from enetc_get_rxnfc() and
+ * enetc_set_rxnfc(). Therefore, add enetc4_get_rxnfc() for ENETC v4 PF.
+ */
+static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
+ u32 *rule_locs)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->num_rx_rings;
+ break;
+ case ETHTOOL_GRXFH:
+ return enetc_get_rsshash(rxnfc);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -677,36 +700,53 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
+static int enetc_get_rss_key_base(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return ENETC_PRSSK(0);
+
+ return ENETC4_PRSSKR(0);
+}
+
+static void enetc_get_rss_key(struct enetc_si *si, const u8 *key)
+{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
+ int i;
+
+ for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
+ ((u32 *)key)[i] = enetc_port_rd(hw, base + i * 4);
+}
+
static int enetc_get_rxfh(struct net_device *ndev,
struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- int err = 0, i;
+ struct enetc_si *si = priv->si;
+ int err = 0;
/* return hash function */
rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (rxfh->key && hw->port)
- for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
- ENETC_PRSSK(i));
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_get_rss_key(si, rxfh->key);
/* return RSS table */
if (rxfh->indir)
- err = enetc_get_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->get_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
-void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
+void enetc_set_rss_key(struct enetc_si *si, const u8 *bytes)
{
+ int base = enetc_get_rss_key_base(si);
+ struct enetc_hw *hw = &si->hw;
int i;
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
+ enetc_port_wr(hw, base + i * 4, ((u32 *)bytes)[i]);
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
@@ -715,17 +755,16 @@ static int enetc_set_rxfh(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
int err = 0;
/* set hash key, if PF */
- if (rxfh->key && hw->port)
- enetc_set_rss_key(hw, rxfh->key);
+ if (rxfh->key && enetc_si_is_pf(si))
+ enetc_set_rss_key(si, rxfh->key);
/* set RSS table */
if (rxfh->indir)
- err = enetc_set_rss_table(priv->si, rxfh->indir,
- priv->si->num_rss);
+ err = si->ops->set_rss_table(si, rxfh->indir, si->num_rss);
return err;
}
@@ -1240,6 +1279,11 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_rxnfc = enetc4_get_rxnfc,
+ .get_rxfh_key_size = enetc_get_rxfh_key_size,
+ .get_rxfh_indir_size = enetc_get_rxfh_indir_size,
+ .get_rxfh = enetc_get_rxfh,
+ .set_rxfh = enetc_set_rxfh,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 203862ec1114..f63a29e2e031 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -72,30 +72,6 @@ static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos)
enetc_port_wr(hw, ENETC_PSIVLANR(si), val);
}
-static int enetc_mac_addr_hash_idx(const u8 *addr)
-{
- u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16;
- u64 mask = 0;
- int res = 0;
- int i;
-
- for (i = 0; i < 8; i++)
- mask |= BIT_ULL(i * 6);
-
- for (i = 0; i < 6; i++)
- res |= (hweight64(fold & (mask << i)) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter)
-{
- filter->mac_addr_cnt = 0;
-
- bitmap_zero(filter->mac_hash_table,
- ENETC_MADDR_HASH_TBL_SZ);
-}
-
static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
const unsigned char *addr)
{
@@ -104,16 +80,6 @@ static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter,
filter->mac_addr_cnt++;
}
-static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter,
- const unsigned char *addr)
-{
- int idx = enetc_mac_addr_hash_idx(addr);
-
- /* add hash table entry */
- __set_bit(idx, filter->mac_hash_table);
- filter->mac_addr_cnt++;
-}
-
static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type)
{
bool err = si->errata & ENETC_ERR_UCMCSWP;
@@ -250,67 +216,6 @@ static void enetc_pf_set_rx_mode(struct net_device *ndev)
enetc_port_wr(hw, ENETC_PSIPMR, psipmr);
}
-static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx,
- unsigned long hash)
-{
- enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), lower_32_bits(hash));
- enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), upper_32_bits(hash));
-}
-
-static int enetc_vid_hash_idx(unsigned int vid)
-{
- int res = 0;
- int i;
-
- for (i = 0; i < 6; i++)
- res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
-
- return res;
-}
-
-static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash)
-{
- int i;
-
- if (rehash) {
- bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
-
- for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
- int hidx = enetc_vid_hash_idx(i);
-
- __set_bit(hidx, pf->vlan_ht_filter);
- }
- }
-
- enetc_set_vlan_ht_filter(&pf->si->hw, 0, *pf->vlan_ht_filter);
-}
-
-static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
- int idx;
-
- __set_bit(vid, pf->active_vlans);
-
- idx = enetc_vid_hash_idx(vid);
- if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
- enetc_sync_vlan_ht_filter(pf, false);
-
- return 0;
-}
-
-static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- __clear_bit(vid, pf->active_vlans);
- enetc_sync_vlan_ht_filter(pf, true);
-
- return 0;
-}
-
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -549,7 +454,6 @@ static void enetc_mac_enable(struct enetc_si *si, bool en)
static void enetc_configure_port(struct enetc_pf *pf)
{
- u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
enetc_configure_port_mac(pf->si);
@@ -557,8 +461,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_port_si_configure(pf->si);
/* set up hash key */
- get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
- enetc_set_rss_key(hw, hash_key);
+ enetc_set_default_rss_key(pf);
/* split up RFS entries */
enetc_port_assign_rfs_entries(pf->si);
@@ -728,6 +631,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static struct phylink_pcs *
@@ -939,6 +844,11 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
return enetc_ierb_register_pf(ierb_pdev, pdev);
}
+static const struct enetc_si_ops enetc_psi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
struct enetc_si *si;
@@ -958,6 +868,7 @@ static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
}
si->revision = enetc_get_ip_revision(&si->hw);
+ si->ops = &enetc_psi_ops;
err = enetc_get_driver_data(si);
if (err) {
dev_err(&pdev->dev, "Could not get PF driver data\n");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index a26a12863855..ae407e9e9ee7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -5,19 +5,8 @@
#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
-
-enum enetc_mac_addr_type {UC, MC, MADDR_TYPE};
#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE)
-#define ENETC_MADDR_HASH_TBL_SZ 64
-struct enetc_mac_filter {
- union {
- char mac_addr[ETH_ALEN];
- DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ);
- };
- int mac_addr_cnt;
-};
-
#define ENETC_VLAN_HT_SIZE 64
enum enetc_vf_flags {
@@ -34,6 +23,7 @@ struct enetc_port_caps {
int num_msix;
int num_rx_bdr;
int num_tx_bdr;
+ int mac_filter_num;
};
struct enetc_pf;
@@ -71,6 +61,8 @@ struct enetc_pf {
struct enetc_port_caps caps;
const struct enetc_pf_ops *ops;
+
+ int num_mfe; /* number of mac address filter table entries */
};
#define phylink_to_enetc_pf(config) \
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 3fd9b0727875..edf14a95cab7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -128,14 +128,14 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
if (si->hw_features & ENETC_SI_F_LSO)
priv->active_offloads |= ENETC_F_LSO;
- /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
- if (!is_enetc_rev1(si)) {
- ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
- goto end;
+ if (si->num_rss) {
+ ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
}
- if (si->num_rss)
- ndev->hw_features |= NETIF_F_RXHASH;
+ /* TODO: currently, i.MX95 ENETC driver does not support advanced features */
+ if (!is_enetc_rev1(si))
+ goto end;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
@@ -341,5 +341,86 @@ void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
}
EXPORT_SYMBOL_GPL(enetc_phylink_destroy);
+void enetc_set_default_rss_key(struct enetc_pf *pf)
+{
+ u8 hash_key[ENETC_RSSHASH_KEY_SIZE] = {0};
+
+ /* set up hash key */
+ get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE);
+ enetc_set_rss_key(pf->si, hash_key);
+}
+EXPORT_SYMBOL_GPL(enetc_set_default_rss_key);
+
+static int enetc_vid_hash_idx(unsigned int vid)
+{
+ int res = 0;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i;
+
+ return res;
+}
+
+static void enetc_refresh_vlan_ht_filter(struct enetc_pf *pf)
+{
+ int i;
+
+ bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE);
+ for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) {
+ int hidx = enetc_vid_hash_idx(i);
+
+ __set_bit(hidx, pf->vlan_ht_filter);
+ }
+}
+
+static void enetc_set_si_vlan_ht_filter(struct enetc_si *si,
+ int si_id, u64 hash)
+{
+ struct enetc_hw *hw = &si->hw;
+ int high_reg_off, low_reg_off;
+
+ if (is_enetc_rev1(si)) {
+ low_reg_off = ENETC_PSIVHFR0(si_id);
+ high_reg_off = ENETC_PSIVHFR1(si_id);
+ } else {
+ low_reg_off = ENETC4_PSIVHFR0(si_id);
+ high_reg_off = ENETC4_PSIVHFR1(si_id);
+ }
+
+ enetc_port_wr(hw, low_reg_off, lower_32_bits(hash));
+ enetc_port_wr(hw, high_reg_off, upper_32_bits(hash));
+}
+
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+ int idx;
+
+ __set_bit(vid, pf->active_vlans);
+
+ idx = enetc_vid_hash_idx(vid);
+ if (!__test_and_set_bit(idx, pf->vlan_ht_filter))
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_add_vid);
+
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+ if (__test_and_clear_bit(vid, pf->active_vlans)) {
+ enetc_refresh_vlan_ht_filter(pf);
+ enetc_set_si_vlan_ht_filter(pf->si, 0, *pf->vlan_ht_filter);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_vlan_rx_del_vid);
+
MODULE_DESCRIPTION("NXP ENETC PF common functionality driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
index 48f55ee743ad..96d4840a3107 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.h
@@ -12,6 +12,9 @@ void enetc_mdiobus_destroy(struct enetc_pf *pf);
int enetc_phylink_create(struct enetc_ndev_priv *priv, struct device_node *node,
const struct phylink_mac_ops *ops);
void enetc_phylink_destroy(struct enetc_ndev_priv *priv);
+void enetc_set_default_rss_key(struct enetc_pf *pf);
+int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid);
+int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid);
static inline u16 enetc_get_ip_revision(struct enetc_hw *hw)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 3768752b6008..6c4b374bcb0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -121,6 +121,8 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
.ndo_setup_tc = enetc_vf_setup_tc,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
@@ -155,13 +157,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- if (si->num_rss)
+ if (si->num_rss) {
ndev->hw_features |= NETIF_F_RXHASH;
+ ndev->features |= NETIF_F_RXHASH;
+ }
/* pick up primary MAC address from SI */
enetc_load_primary_mac_addr(&si->hw, ndev);
}
+static const struct enetc_si_ops enetc_vsi_ops = {
+ .get_rss_table = enetc_get_rss_table,
+ .set_rss_table = enetc_set_rss_table,
+};
+
static int enetc_vf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -176,6 +185,7 @@ static int enetc_vf_probe(struct pci_dev *pdev,
si = pci_get_drvdata(pdev);
si->revision = ENETC_REV_1_0;
+ si->ops = &enetc_vsi_ops;
err = enetc_get_driver_data(si);
if (err) {
dev_err_probe(&pdev->dev, err,
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
new file mode 100644
index 000000000000..ba32c1bbd9e1
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NETC NTMP (NETC Table Management Protocol) 2.0 Library
+ * Copyright 2025 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/iopoll.h>
+
+#include "ntmp_private.h"
+
+#define NETC_CBDR_TIMEOUT 1000 /* us */
+#define NETC_CBDR_DELAY_US 10
+#define NETC_CBDR_MR_EN BIT(31)
+
+#define NTMP_BASE_ADDR_ALIGN 128
+#define NTMP_DATA_ADDR_ALIGN 32
+
+/* Define NTMP Table ID */
+#define NTMP_MAFT_ID 1
+#define NTMP_RSST_ID 3
+
+/* Generic Update Actions for most tables */
+#define NTMP_GEN_UA_CFGEU BIT(0)
+#define NTMP_GEN_UA_STSEU BIT(1)
+
+#define NTMP_ENTRY_ID_SIZE 4
+#define RSST_ENTRY_NUM 64
+#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
+#define RSST_CFGE_DATA_SIZE(n) (n)
+
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ int cbd_num = NETC_CBDR_BD_NUM;
+ size_t size;
+
+ size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
+ cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
+ GFP_KERNEL);
+ if (!cbdr->addr_base)
+ return -ENOMEM;
+
+ cbdr->dma_size = size;
+ cbdr->bd_num = cbd_num;
+ cbdr->regs = *regs;
+ cbdr->dev = dev;
+
+ /* The base address of the Control BD Ring must be 128 bytes aligned */
+ cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
+ cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
+ NTMP_BASE_ADDR_ALIGN);
+
+ cbdr->next_to_clean = 0;
+ cbdr->next_to_use = 0;
+ spin_lock_init(&cbdr->ring_lock);
+
+ /* Step 1: Configure the base address of the Control BD Ring */
+ netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
+ netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
+
+ /* Step 2: Configure the producer index register */
+ netc_write(cbdr->regs.pir, cbdr->next_to_clean);
+
+ /* Step 3: Configure the consumer index register */
+ netc_write(cbdr->regs.cir, cbdr->next_to_use);
+
+ /* Step4: Configure the number of BDs of the Control BD Ring */
+ netc_write(cbdr->regs.lenr, cbdr->bd_num);
+
+ /* Step 5: Enable the Control BD Ring */
+ netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+
+void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+ /* Disable the Control BD Ring */
+ netc_write(cbdr->regs.mr, 0);
+ dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
+ cbdr->dma_base);
+ memset(cbdr, 0, sizeof(*cbdr));
+}
+EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
+
+static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
+{
+ return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
+ cbdr->bd_num) % cbdr->bd_num;
+}
+
+static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
+{
+ return &((union netc_cbd *)(cbdr->addr_base_align))[index];
+}
+
+static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
+{
+ union netc_cbd *cbd;
+ int i;
+
+ i = cbdr->next_to_clean;
+ while (netc_read(cbdr->regs.cir) != i) {
+ cbd = ntmp_get_cbd(cbdr, i);
+ memset(cbd, 0, sizeof(*cbd));
+ i = (i + 1) % cbdr->bd_num;
+ }
+
+ cbdr->next_to_clean = i;
+}
+
+static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+{
+ union netc_cbd *cur_cbd;
+ struct netc_cbdr *cbdr;
+ int i, err;
+ u16 status;
+ u32 val;
+
+ /* Currently only i.MX95 ENETC is supported, and it only has one
+ * command BD ring
+ */
+ cbdr = &user->ring[0];
+
+ spin_lock_bh(&cbdr->ring_lock);
+
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ ntmp_clean_cbdr(cbdr);
+
+ i = cbdr->next_to_use;
+ cur_cbd = ntmp_get_cbd(cbdr, i);
+ *cur_cbd = *cbd;
+ dma_wmb();
+
+ /* Update producer index of both software and hardware */
+ i = (i + 1) % cbdr->bd_num;
+ cbdr->next_to_use = i;
+ netc_write(cbdr->regs.pir, i);
+
+ err = read_poll_timeout_atomic(netc_read, val, val == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
+ if (unlikely(err))
+ goto cbdr_unlock;
+
+ dma_rmb();
+ /* Get the writeback command BD, because the caller may need
+ * to check some other fields of the response header.
+ */
+ *cbd = *cur_cbd;
+
+ /* Check the writeback error status */
+ status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
+ if (unlikely(status)) {
+ err = -EIO;
+ dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ }
+
+ ntmp_clean_cbdr(cbdr);
+ dma_wmb();
+
+cbdr_unlock:
+ spin_unlock_bh(&cbdr->ring_lock);
+
+ return err;
+}
+
+static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+{
+ void *buf;
+
+ buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ &data->dma, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data->buf = buf;
+ *buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
+
+ return 0;
+}
+
+static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
+{
+ dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
+ data->buf, data->dma);
+}
+
+static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
+ int len, int table_id, int cmd,
+ int access_method)
+{
+ dma_addr_t dma_align;
+
+ memset(cbd, 0, sizeof(*cbd));
+ dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
+ cbd->req_hdr.addr = cpu_to_le64(dma_align);
+ cbd->req_hdr.len = cpu_to_le32(len);
+ cbd->req_hdr.cmd = cmd;
+ cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
+ access_method);
+ cbd->req_hdr.table_id = table_id;
+ cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
+ NTMP_HDR_VER2);
+ /* For NTMP version 2.0 or later version */
+ cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
+}
+
+static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
+ u8 qa, u16 ua)
+{
+ crd->update_act = cpu_to_le16(ua);
+ crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
+}
+
+static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
+ u8 qa, u16 ua, u32 entry_id)
+{
+ ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
+ rbe->entry_id = cpu_to_le32(entry_id);
+}
+
+static const char *ntmp_table_name(int tbl_id)
+{
+ switch (tbl_id) {
+ case NTMP_MAFT_ID:
+ return "MAC Address Filter Table";
+ case NTMP_RSST_ID:
+ return "RSS Table";
+ default:
+ return "Unknown Table";
+ };
+}
+
+static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u8 tbl_ver, u32 entry_id, u32 req_len,
+ u32 resp_len)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = max(req_len, resp_len),
+ };
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev,
+ "Failed to delete entry 0x%x of %s, err: %pe",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+
+static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
+ u32 len, struct ntmp_req_by_eid *req,
+ dma_addr_t dma, bool compare_eid)
+{
+ struct ntmp_cmn_resp_query *resp;
+ int cmd = NTMP_CMD_QUERY;
+ union netc_cbd cbd;
+ u32 entry_id;
+ int err;
+
+ entry_id = le32_to_cpu(req->entry_id);
+ if (le16_to_cpu(req->crd.update_act))
+ cmd = NTMP_CMD_QU;
+
+ /* Request header */
+ ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev,
+ "Failed to query entry 0x%x of %s, err: %pe\n",
+ entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
+ return err;
+ }
+
+ /* For a few tables, the first field of their response data is not
+ * entry_id, so directly return success.
+ */
+ if (!compare_eid)
+ return 0;
+
+ resp = (struct ntmp_cmn_resp_query *)req;
+ if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
+ dev_err(user->dev,
+ "%s: query EID 0x%x doesn't match response EID 0x%x\n",
+ ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_req_add),
+ };
+ struct maft_req_add *req;
+ union netc_cbd cbd;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set mac address filter table request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
+ req->keye = maft->keye;
+ req->cfge = maft->cfge;
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
+ entry_id, ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
+
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ struct ntmp_dma_buf data = {
+ .dev = user->dev,
+ .size = sizeof(struct maft_resp_query),
+ };
+ struct maft_resp_query *resp;
+ struct ntmp_req_by_eid *req;
+ int err;
+
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
+ err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
+ NTMP_LEN(sizeof(*req), data.size),
+ req, data.dma, true);
+ if (err)
+ goto end;
+
+ resp = (struct maft_resp_query *)req;
+ maft->keye = resp->keye;
+ maft->cfge = resp->cfge;
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
+
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
+ entry_id, NTMP_EID_REQ_LEN, 0);
+}
+EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
+
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct rsst_req_update *req;
+ union netc_cbd cbd;
+ int err, i;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
+ NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
+ for (i = 0; i < count; i++)
+ req->groups[i] = (u8)(table[i]);
+
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
+
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err)
+ dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
+ ERR_PTR(err));
+
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
+
+int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
+{
+ struct ntmp_dma_buf data = {.dev = user->dev};
+ struct ntmp_req_by_eid *req;
+ union netc_cbd cbd;
+ int err, i;
+ u8 *group;
+
+ if (count != RSST_ENTRY_NUM)
+ /* HW only takes in a full 64 entry table */
+ return -EINVAL;
+
+ data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(&data, (void **)&req);
+ if (err)
+ return err;
+
+ /* Set the request data buffer */
+ ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
+ ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(user, &cbd);
+ if (err) {
+ dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
+ ERR_PTR(err));
+ goto end;
+ }
+
+ group = (u8 *)req;
+ group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
+ for (i = 0; i < count; i++)
+ table[i] = group[i];
+
+end:
+ ntmp_free_data_mem(&data);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
+
+MODULE_DESCRIPTION("NXP NETC Library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
new file mode 100644
index 000000000000..34394e40fddd
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * NTMP table request and response data buffer formats
+ * Copyright 2025 NXP
+ */
+
+#ifndef __NTMP_PRIVATE_H
+#define __NTMP_PRIVATE_H
+
+#include <linux/bitfield.h>
+#include <linux/fsl/ntmp.h>
+
+#define NTMP_EID_REQ_LEN 8
+#define NETC_CBDR_BD_NUM 256
+
+union netc_cbd {
+ struct {
+ __le64 addr;
+ __le32 len;
+#define NTMP_RESP_LEN GENMASK(19, 0)
+#define NTMP_REQ_LEN GENMASK(31, 20)
+#define NTMP_LEN(req, resp) (FIELD_PREP(NTMP_REQ_LEN, (req)) | \
+ ((resp) & NTMP_RESP_LEN))
+ u8 cmd;
+#define NTMP_CMD_DELETE BIT(0)
+#define NTMP_CMD_UPDATE BIT(1)
+#define NTMP_CMD_QUERY BIT(2)
+#define NTMP_CMD_ADD BIT(3)
+#define NTMP_CMD_QU (NTMP_CMD_QUERY | NTMP_CMD_UPDATE)
+ u8 access_method;
+#define NTMP_ACCESS_METHOD GENMASK(7, 4)
+#define NTMP_AM_ENTRY_ID 0
+#define NTMP_AM_EXACT_KEY 1
+#define NTMP_AM_SEARCH 2
+#define NTMP_AM_TERNARY_KEY 3
+ u8 table_id;
+ u8 ver_cci_rr;
+#define NTMP_HDR_VERSION GENMASK(5, 0)
+#define NTMP_HDR_VER2 2
+#define NTMP_CCI BIT(6)
+#define NTMP_RR BIT(7)
+ __le32 resv[3];
+ __le32 npf;
+#define NTMP_NPF BIT(15)
+ } req_hdr; /* NTMP Request Message Header Format */
+
+ struct {
+ __le32 resv0[3];
+ __le16 num_matched;
+ __le16 error_rr;
+#define NTMP_RESP_ERROR GENMASK(11, 0)
+#define NTMP_RESP_RR BIT(15)
+ __le32 resv1[4];
+ } resp_hdr; /* NTMP Response Message Header Format */
+};
+
+struct ntmp_dma_buf {
+ struct device *dev;
+ size_t size;
+ void *buf;
+ dma_addr_t dma;
+};
+
+struct ntmp_cmn_req_data {
+ __le16 update_act;
+ u8 dbg_opt;
+ u8 tblv_qact;
+#define NTMP_QUERY_ACT GENMASK(3, 0)
+#define NTMP_TBL_VER GENMASK(7, 4)
+#define NTMP_TBLV_QACT(v, a) (FIELD_PREP(NTMP_TBL_VER, (v)) | \
+ ((a) & NTMP_QUERY_ACT))
+};
+
+struct ntmp_cmn_resp_query {
+ __le32 entry_id;
+};
+
+/* Generic structure for request data by entry ID */
+struct ntmp_req_by_eid {
+ struct ntmp_cmn_req_data crd;
+ __le32 entry_id;
+};
+
+/* MAC Address Filter Table Request Data Buffer Format of Add action */
+struct maft_req_add {
+ struct ntmp_req_by_eid rbe;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* MAC Address Filter Table Response Data Buffer Format of Query action */
+struct maft_resp_query {
+ __le32 entry_id;
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+/* RSS Table Request Data Buffer Format of Update action */
+struct rsst_req_update {
+ struct ntmp_req_by_eid rbe;
+ u8 groups[];
+};
+
+#endif
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index deb35b38c976..bcbcad613512 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2061,15 +2061,13 @@ static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&priv->reset_task);
}
-static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
break;
@@ -2082,7 +2080,7 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
priv->hwts_rx_en = 0;
@@ -2096,44 +2094,23 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
priv->hwts_rx_en = 1;
reset_gfar(netdev);
}
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
-static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int gfar_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
- config.flags = 0;
- config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = (priv->hwts_rx_en ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct phy_device *phydev = dev->phydev;
+ config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cmd == SIOCSHWTSTAMP)
- return gfar_hwtstamp_set(dev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return gfar_hwtstamp_get(dev, rq);
-
- if (!phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(phydev, rq, cmd);
+ return 0;
}
/* Interrupt Handler for Transmit complete */
@@ -3174,7 +3151,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_features = gfar_set_features,
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
- .ndo_eth_ioctl = gfar_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
@@ -3182,6 +3159,8 @@ static const struct net_device_ops gfar_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
#endif
+ .ndo_hwtstamp_get = gfar_hwtstamp_get,
+ .ndo_hwtstamp_set = gfar_hwtstamp_set,
};
/* Set up the ethernet device structure, private data,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index eae1a7595a69..3c1da0cf3f61 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -67,7 +67,7 @@ static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
-static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array = {
"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
@@ -113,7 +113,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
i);
for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
- ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
+ ethtool_cpy(&s, gve_gstrings_adminq_stats[i]);
break;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index c3791cf23c87..e1ffbd561fac 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1830,7 +1830,7 @@ static void gve_turndown(struct gve_priv *priv)
/* Stop tx queues */
netif_tx_disable(priv->dev);
- xdp_features_clear_redirect_target(priv->dev);
+ xdp_features_clear_redirect_target_locked(priv->dev);
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
@@ -1902,7 +1902,7 @@ static void gve_turnup(struct gve_priv *priv)
}
if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
- xdp_features_set_redirect_target(priv->dev, false);
+ xdp_features_set_redirect_target_locked(priv->dev, false);
gve_set_napi_enabled(priv);
}
@@ -2185,7 +2185,7 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_features = 0;
}
- xdp_set_features_flag(priv->dev, xdp_features);
+ xdp_set_features_flag_locked(priv->dev, xdp_features);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
@@ -2659,6 +2659,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto abort_with_wq;
+ if (!gve_is_gqi(priv) && !gve_is_qpl(priv))
+ dev->netmem_tx = true;
+
err = register_netdev(dev);
if (err)
goto abort_with_gve_init;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 2eba868d8037..a27f1574a733 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -660,7 +660,8 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
goto err;
dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
- dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt,
+ dma[pkt->num_bufs], addr);
++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
@@ -1038,8 +1039,9 @@ static void gve_unmap_packet(struct device *dev,
dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE, 0);
}
pkt->num_bufs = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index a0bcfb5a713d..ff3295b60a69 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -61,6 +61,8 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
return -EBUSY;
}
+ netif_device_detach(priv->netdev);
+
priv->reset_type = type;
set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
@@ -91,6 +93,8 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
return ret;
}
+ netif_device_attach(priv->netdev);
+
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
@@ -117,16 +121,13 @@ void hbg_err_reset(struct hbg_priv *priv)
if (running)
dev_close(priv->netdev);
- hbg_reset(priv);
-
- /* in hbg_pci_err_detected(), we will detach first,
- * so we need to attach before open
- */
- if (!netif_device_present(priv->netdev))
- netif_device_attach(priv->netdev);
+ if (hbg_reset(priv))
+ goto err_unlock;
if (running)
dev_open(priv->netdev, NULL);
+
+err_unlock:
rtnl_unlock();
}
@@ -160,7 +161,6 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
hbg_err_reset(priv);
- netif_device_attach(netdev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 8f1107b85fbb..55520053270a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -317,6 +317,9 @@ static void hbg_update_stats_by_info(struct hbg_priv *priv,
const struct hbg_ethtool_stats *stats;
u32 i;
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
for (i = 0; i < info_len; i++) {
stats = &info[i];
if (!stats->reg)
diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig
index c05fce15eb51..7d0feb1da158 100644
--- a/drivers/net/ethernet/huawei/Kconfig
+++ b/drivers/net/ethernet/huawei/Kconfig
@@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI
if NET_VENDOR_HUAWEI
source "drivers/net/ethernet/huawei/hinic/Kconfig"
+source "drivers/net/ethernet/huawei/hinic3/Kconfig"
endif # NET_VENDOR_HUAWEI
diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile
index 2549ad5afe6d..59865b882879 100644
--- a/drivers/net/ethernet/huawei/Makefile
+++ b/drivers/net/ethernet/huawei/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_HINIC) += hinic/
+obj-$(CONFIG_HINIC3) += hinic3/
diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig
new file mode 100644
index 000000000000..ce4331d1387b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Huawei driver configuration
+#
+
+config HINIC3
+ tristate "Huawei 3rd generation network adapters (HINIC3) support"
+ # Fields of HW and management structures are little endian and are
+ # currently not converted
+ depends on !CPU_BIG_ENDIAN
+ depends on X86 || ARM64 || COMPILE_TEST
+ depends on PCI_MSI && 64BIT
+ select AUXILIARY_BUS
+ select PAGE_POOL
+ help
+ This driver supports HiNIC 3rd gen Network Adapter (HINIC3).
+ The driver is supported on X86_64 and ARM64 little endian.
+
+ To compile this driver as a module, choose M here.
+ The module will be called hinic3.
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
new file mode 100644
index 000000000000..509dfbfb0e96
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+obj-$(CONFIG_HINIC3) += hinic3.o
+
+hinic3-objs := hinic3_common.o \
+ hinic3_hw_cfg.o \
+ hinic3_hw_comm.o \
+ hinic3_hwdev.o \
+ hinic3_hwif.o \
+ hinic3_irq.o \
+ hinic3_lld.o \
+ hinic3_main.o \
+ hinic3_mbox.o \
+ hinic3_netdev_ops.o \
+ hinic3_nic_cfg.o \
+ hinic3_nic_io.o \
+ hinic3_queue_common.o \
+ hinic3_rx.o \
+ hinic3_tx.o \
+ hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
new file mode 100644
index 000000000000..0aa42068728c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_addr_t paddr, align_paddr;
+ void *vaddr, *align_vaddr;
+ u32 real_size = size;
+
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ if (align_paddr == paddr) {
+ align_vaddr = vaddr;
+ goto out;
+ }
+
+ dma_free_coherent(dev, real_size, vaddr, paddr);
+
+ /* realloc memory for align */
+ real_size = size + align;
+ vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag);
+ if (!vaddr)
+ return -ENOMEM;
+
+ align_paddr = ALIGN(paddr, align);
+ align_vaddr = vaddr + (align_paddr - paddr);
+
+out:
+ mem_align->real_size = real_size;
+ mem_align->ori_vaddr = vaddr;
+ mem_align->ori_paddr = paddr;
+ mem_align->align_vaddr = align_vaddr;
+ mem_align->align_paddr = align_paddr;
+
+ return 0;
+}
+
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align)
+{
+ dma_free_coherent(dev, mem_align->real_size,
+ mem_align->ori_vaddr, mem_align->ori_paddr);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
new file mode 100644
index 000000000000..bb795dace04c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_COMMON_H_
+#define _HINIC3_COMMON_H_
+
+#include <linux/device.h>
+
+#define HINIC3_MIN_PAGE_SIZE 0x1000
+
+struct hinic3_dma_addr_align {
+ u32 real_size;
+
+ void *ori_vaddr;
+ dma_addr_t ori_paddr;
+
+ void *align_vaddr;
+ dma_addr_t align_paddr;
+};
+
+int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
+ gfp_t flag,
+ struct hinic3_dma_addr_align *mem_align);
+void hinic3_dma_free_coherent_align(struct device *dev,
+ struct hinic3_dma_addr_align *mem_align);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
new file mode 100644
index 000000000000..87d9450c30ca
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
+ BIT(HINIC3_SERVICE_T_NIC);
+}
+
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs;
+}
+
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->cfg_mgmt->cap.port_id;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
new file mode 100644
index 000000000000..e017b1ae9f05
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_CFG_H_
+#define _HINIC3_HW_CFG_H_
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+struct hinic3_hwdev;
+
+struct hinic3_irq {
+ u32 irq_id;
+ u16 msix_entry_idx;
+ bool allocated;
+};
+
+struct hinic3_irq_info {
+ struct hinic3_irq *irq;
+ u16 num_irq;
+ /* device max irq number */
+ u16 num_irq_hw;
+ /* protect irq alloc and free */
+ struct mutex irq_mutex;
+};
+
+struct hinic3_nic_service_cap {
+ u16 max_sqs;
+};
+
+/* Device capabilities */
+struct hinic3_dev_cap {
+ /* Bitmasks of services supported by device */
+ u16 supp_svcs_bitmap;
+ /* Physical port */
+ u8 port_id;
+ struct hinic3_nic_service_cap nic_svc_cap;
+};
+
+struct hinic3_cfg_mgmt_info {
+ struct hinic3_irq_info irq_info;
+ struct hinic3_dev_cap cap;
+};
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num);
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+
+bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
+u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
+u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
new file mode 100644
index 000000000000..434696ce7dc2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
+{
+ struct comm_cmd_func_reset func_reset = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ func_reset.func_id = func_id;
+ func_reset.reset_flag = reset_flag;
+
+ mgmt_msg_params_init_default(&msg_params, &func_reset,
+ sizeof(func_reset));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FUNC_RESET, &msg_params);
+ if (err || func_reset.head.status) {
+ dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n",
+ reset_flag, err, func_reset.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
new file mode 100644
index 000000000000..c33a1c77da9c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_COMM_H_
+#define _HINIC3_HW_COMM_H_
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_hwdev;
+
+int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
new file mode 100644
index 000000000000..22c84093efa2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HW_INTF_H_
+#define _HINIC3_HW_INTF_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#define MGMT_MSG_CMD_OP_SET 1
+#define MGMT_MSG_CMD_OP_GET 0
+
+#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4
+#define MGMT_STATUS_EXIST 0x6
+#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF
+
+#define MGMT_MSG_POLLING_TIMEOUT 0
+
+struct mgmt_msg_head {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+};
+
+struct mgmt_msg_params {
+ const void *buf_in;
+ u32 in_size;
+ void *buf_out;
+ u32 expected_out_size;
+ u32 timeout_ms;
+};
+
+/* CMDQ MODULE_TYPE */
+enum mgmt_mod_type {
+ /* HW communication module */
+ MGMT_MOD_COMM = 0,
+ /* L2NIC module */
+ MGMT_MOD_L2NIC = 1,
+ /* Configuration module */
+ MGMT_MOD_CFGM = 7,
+ MGMT_MOD_HILINK = 14,
+};
+
+static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params,
+ void *inout_buf, u32 buf_size)
+{
+ msg_params->buf_in = inout_buf;
+ msg_params->buf_out = inout_buf;
+ msg_params->in_size = buf_size;
+ msg_params->expected_out_size = buf_size;
+ msg_params->timeout_ms = 0;
+}
+
+/* COMM Commands between Driver to fw */
+enum comm_cmd {
+ /* Commands for clearing FLR and resources */
+ COMM_CMD_FUNC_RESET = 0,
+ COMM_CMD_FEATURE_NEGO = 1,
+ COMM_CMD_FLUSH_DOORBELL = 2,
+ COMM_CMD_START_FLUSH = 3,
+ COMM_CMD_GET_GLOBAL_ATTR = 5,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE = 7,
+
+ /* Driver Configuration Commands */
+ COMM_CMD_SET_CMDQ_CTXT = 20,
+ COMM_CMD_SET_VAT = 21,
+ COMM_CMD_CFG_PAGESIZE = 22,
+ COMM_CMD_CFG_MSIX_CTRL_REG = 23,
+ COMM_CMD_SET_CEQ_CTRL_REG = 24,
+ COMM_CMD_SET_DMA_ATTR = 25,
+};
+
+enum comm_func_reset_bits {
+ COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
+ COMM_FUNC_RESET_BIT_MQM = BIT(1),
+ COMM_FUNC_RESET_BIT_SMF = BIT(2),
+ COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3),
+
+ COMM_FUNC_RESET_BIT_COMM = BIT(10),
+ /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11),
+ /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */
+ COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12),
+ COMM_FUNC_RESET_BIT_NIC = BIT(13),
+};
+
+struct comm_cmd_func_reset {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+ u64 reset_flag;
+};
+
+#define COMM_MAX_FEATURE_QWORD 4
+struct comm_cmd_feature_nego {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[COMM_MAX_FEATURE_QWORD];
+};
+
+/* Services supported by HW. HW uses these values when delivering events.
+ * HW supports multiple services that are not yet supported by driver
+ * (e.g. RoCE).
+ */
+enum hinic3_service_type {
+ HINIC3_SERVICE_T_NIC = 0,
+ /* MAX is only used by SW for array sizes. */
+ HINIC3_SERVICE_T_MAX = 1,
+};
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
new file mode 100644
index 000000000000..6e8788a64925
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+int hinic3_init_hwdev(struct pci_dev *pdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
new file mode 100644
index 000000000000..62e2745e9316
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWDEV_H_
+#define _HINIC3_HWDEV_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/pci.h>
+
+#include "hinic3_hw_intf.h"
+
+struct hinic3_cmdqs;
+struct hinic3_hwif;
+
+enum hinic3_event_service_type {
+ HINIC3_EVENT_SRV_COMM = 0,
+ HINIC3_EVENT_SRV_NIC = 1
+};
+
+#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type))
+
+/* driver-specific data of pci_dev */
+struct hinic3_pcidev {
+ struct pci_dev *pdev;
+ struct hinic3_hwdev *hwdev;
+ /* Auxiliary devices */
+ struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX];
+
+ void __iomem *cfg_reg_base;
+ void __iomem *intr_reg_base;
+ void __iomem *db_base;
+ u64 db_dwqe_len;
+ u64 db_base_phy;
+
+ /* lock for attach/detach uld */
+ struct mutex pdev_mutex;
+ unsigned long state;
+};
+
+struct hinic3_hwdev {
+ struct hinic3_pcidev *adapter;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int dev_id;
+ struct hinic3_hwif *hwif;
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ struct hinic3_aeqs *aeqs;
+ struct hinic3_ceqs *ceqs;
+ struct hinic3_mbox *mbox;
+ struct hinic3_cmdqs *cmdqs;
+ struct workqueue_struct *workq;
+ /* protect channel init and uninit */
+ spinlock_t channel_lock;
+ u64 features[COMM_MAX_FEATURE_QWORD];
+ u32 wq_page_size;
+ u8 max_cmdq;
+ ulong func_state;
+};
+
+struct hinic3_event_info {
+ /* enum hinic3_event_service_type */
+ u16 service;
+ u16 type;
+ u8 event_data[104];
+};
+
+struct hinic3_adev {
+ struct auxiliary_device adev;
+ struct hinic3_hwdev *hwdev;
+ enum hinic3_service_type svc_type;
+
+ void (*event)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event);
+};
+
+int hinic3_init_hwdev(struct pci_dev *pdev);
+void hinic3_free_hwdev(struct hinic3_hwdev *hwdev);
+
+void hinic3_set_api_stop(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
new file mode 100644
index 000000000000..0865453bf0e7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
+{
+ return hwdev->hwif->attr.func_global_idx;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
new file mode 100644
index 000000000000..513c9680e6b6
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_HWIF_H_
+#define _HINIC3_HWIF_H_
+
+#include <linux/build_bug.h>
+#include <linux/spinlock_types.h>
+
+struct hinic3_hwdev;
+
+enum hinic3_func_type {
+ HINIC3_FUNC_TYPE_VF = 1,
+};
+
+struct hinic3_db_area {
+ unsigned long *db_bitmap_array;
+ u32 db_max_areas;
+ /* protect doorbell area alloc and free */
+ spinlock_t idx_lock;
+};
+
+struct hinic3_func_attr {
+ enum hinic3_func_type func_type;
+ u16 func_global_idx;
+ u16 global_vf_id_of_pf;
+ u16 num_irqs;
+ u16 num_sq;
+ u8 port_to_port_idx;
+ u8 pci_intf_idx;
+ u8 ppf_idx;
+ u8 num_aeqs;
+ u8 num_ceqs;
+ u8 msix_flex_en;
+};
+
+static_assert(sizeof(struct hinic3_func_attr) == 20);
+
+struct hinic3_hwif {
+ u8 __iomem *cfg_regs_base;
+ u64 db_base_phy;
+ u64 db_dwqe_len;
+ u8 __iomem *db_base;
+ struct hinic3_db_area db_area;
+ struct hinic3_func_attr attr;
+};
+
+enum hinic3_msix_state {
+ HINIC3_MSIX_ENABLE,
+ HINIC3_MSIX_DISABLE,
+};
+
+void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_state flag);
+
+u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
new file mode 100644
index 000000000000..8b92eed25edf
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_poll(struct napi_struct *napi, int budget)
+{
+ struct hinic3_irq_cfg *irq_cfg =
+ container_of(napi, struct hinic3_irq_cfg, napi);
+ struct hinic3_nic_dev *nic_dev;
+ bool busy = false;
+ int work_done;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+
+ busy |= hinic3_tx_poll(irq_cfg->txq, budget);
+
+ if (unlikely(!budget))
+ return 0;
+
+ work_done = hinic3_rx_poll(irq_cfg->rxq, budget);
+ busy |= work_done >= budget;
+
+ if (busy)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return work_done;
+}
+
+void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
+ napi_enable(&irq_cfg->napi);
+}
+
+void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+{
+ napi_disable(&irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
+ netif_napi_del(&irq_cfg->napi);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
new file mode 100644
index 000000000000..4827326e6a59
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_lld.h"
+#include "hinic3_mgmt.h"
+
+#define HINIC3_VF_PCI_CFG_REG_BAR 0
+#define HINIC3_PCI_INTR_REG_BAR 2
+#define HINIC3_PCI_DB_BAR 4
+
+#define HINIC3_EVENT_POLL_SLEEP_US 1000
+#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000
+
+static struct hinic3_adev_device {
+ const char *name;
+} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = {
+ [HINIC3_SERVICE_T_NIC] = {
+ .name = "nic",
+ },
+};
+
+static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ switch (svc_type) {
+ case HINIC3_SERVICE_T_NIC:
+ return hinic3_support_nic(hwdev);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void hinic3_comm_adev_release(struct device *dev)
+{
+ struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev,
+ adev.dev);
+
+ kfree(hadev);
+}
+
+static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_adev *hadev;
+ const char *svc_name;
+ int ret;
+
+ hadev = kzalloc(sizeof(*hadev), GFP_KERNEL);
+ if (!hadev)
+ return NULL;
+
+ svc_name = hinic3_adev_devices[svc_type].name;
+ hadev->adev.name = svc_name;
+ hadev->adev.id = hwdev->dev_id;
+ hadev->adev.dev.parent = hwdev->dev;
+ hadev->adev.dev.release = hinic3_comm_adev_release;
+ hadev->svc_type = svc_type;
+ hadev->hwdev = hwdev;
+
+ ret = auxiliary_device_init(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed init adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ kfree(hadev);
+ return NULL;
+ }
+
+ ret = auxiliary_device_add(&hadev->adev);
+ if (ret) {
+ dev_err(hwdev->dev, "failed to add adev %s %u\n",
+ svc_name, hwdev->dev_id);
+ auxiliary_device_uninit(&hadev->adev);
+ return NULL;
+ }
+
+ return hadev;
+}
+
+static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev,
+ enum hinic3_service_type svc_type)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_adev *hadev;
+ int timeout;
+ bool state;
+
+ timeout = read_poll_timeout(test_and_set_bit, state, !state,
+ HINIC3_EVENT_POLL_SLEEP_US,
+ HINIC3_EVENT_POLL_TIMEOUT_US,
+ false, svc_type, &pci_adapter->state);
+
+ hadev = pci_adapter->hadev[svc_type];
+ auxiliary_device_delete(&hadev->adev);
+ auxiliary_device_uninit(&hadev->adev);
+ pci_adapter->hadev[svc_type] = NULL;
+ if (!timeout)
+ clear_bit(svc_type, &pci_adapter->state);
+}
+
+static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ enum hinic3_service_type svc_type;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+
+ for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) {
+ if (!hinic3_adev_svc_supported(hwdev, svc_type))
+ continue;
+
+ pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev,
+ svc_type);
+ if (!pci_adapter->hadev[svc_type])
+ goto err_del_adevs;
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+ return 0;
+
+err_del_adevs:
+ while (svc_type > 0) {
+ svc_type--;
+ if (pci_adapter->hadev[svc_type]) {
+ hinic3_del_one_adev(hwdev, svc_type);
+ pci_adapter->hadev[svc_type] = NULL;
+ }
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+ return -ENOMEM;
+}
+
+static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ int i;
+
+ mutex_lock(&pci_adapter->pdev_mutex);
+ for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) {
+ if (pci_adapter->hadev[i])
+ hinic3_del_one_adev(hwdev, i);
+ }
+ mutex_unlock(&pci_adapter->pdev_mutex);
+}
+
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ return hadev->hwdev;
+}
+
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event))
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = event_handler;
+}
+
+void hinic3_adev_event_unregister(struct auxiliary_device *adev)
+{
+ struct hinic3_adev *hadev;
+
+ hadev = container_of(adev, struct hinic3_adev, adev);
+ hadev->event = NULL;
+}
+
+static int hinic3_mapping_bar(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_VF_PCI_CFG_REG_BAR);
+ if (!pci_adapter->cfg_reg_base) {
+ dev_err(&pdev->dev, "Failed to map configuration regs\n");
+ return -ENOMEM;
+ }
+
+ pci_adapter->intr_reg_base = pci_ioremap_bar(pdev,
+ HINIC3_PCI_INTR_REG_BAR);
+ if (!pci_adapter->intr_reg_base) {
+ dev_err(&pdev->dev, "Failed to map interrupt regs\n");
+ goto err_unmap_cfg_reg_base;
+ }
+
+ pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR);
+ pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR);
+ if (!pci_adapter->db_base) {
+ dev_err(&pdev->dev, "Failed to map doorbell regs\n");
+ goto err_unmap_intr_reg_base;
+ }
+
+ return 0;
+
+err_unmap_intr_reg_base:
+ iounmap(pci_adapter->intr_reg_base);
+
+err_unmap_cfg_reg_base:
+ iounmap(pci_adapter->cfg_reg_base);
+
+ return -ENOMEM;
+}
+
+static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter)
+{
+ iounmap(pci_adapter->db_base);
+ iounmap(pci_adapter->intr_reg_base);
+ iounmap(pci_adapter->cfg_reg_base);
+}
+
+static int hinic3_pci_init(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL);
+ if (!pci_adapter)
+ return -ENOMEM;
+
+ pci_adapter->pdev = pdev;
+ mutex_init(&pci_adapter->pdev_mutex);
+
+ pci_set_drvdata(pdev, pci_adapter);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ goto err_free_pci_adapter;
+ }
+
+ err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request regions\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_disable_device(pdev);
+
+err_free_pci_adapter:
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+
+ return err;
+}
+
+static void hinic3_pci_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_destroy(&pci_adapter->pdev_mutex);
+ kfree(pci_adapter);
+}
+
+static int hinic3_func_init(struct pci_dev *pdev,
+ struct hinic3_pcidev *pci_adapter)
+{
+ int err;
+
+ err = hinic3_init_hwdev(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize hardware device\n");
+ return err;
+ }
+
+ err = hinic3_attach_aux_devices(pci_adapter->hwdev);
+ if (err)
+ goto err_free_hwdev;
+
+ return 0;
+
+err_free_hwdev:
+ hinic3_free_hwdev(pci_adapter->hwdev);
+
+ return err;
+}
+
+static void hinic3_func_uninit(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_detach_aux_devices(pci_adapter->hwdev);
+ hinic3_free_hwdev(pci_adapter->hwdev);
+}
+
+static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+ int err;
+
+ err = hinic3_mapping_bar(pdev, pci_adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map bar\n");
+ goto err_out;
+ }
+
+ err = hinic3_func_init(pdev, pci_adapter);
+ if (err)
+ goto err_unmap_bar;
+
+ return 0;
+
+err_unmap_bar:
+ hinic3_unmapping_bar(pci_adapter);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe function failed\n");
+ return err;
+}
+
+static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter)
+{
+ struct pci_dev *pdev = pci_adapter->pdev;
+
+ hinic3_func_uninit(pdev);
+ hinic3_unmapping_bar(pci_adapter);
+}
+
+static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hinic3_pcidev *pci_adapter;
+ int err;
+
+ err = hinic3_pci_init(pdev);
+ if (err)
+ goto err_out;
+
+ pci_adapter = pci_get_drvdata(pdev);
+ err = hinic3_probe_func(pci_adapter);
+ if (err)
+ goto err_uninit_pci;
+
+ return 0;
+
+err_uninit_pci:
+ hinic3_pci_uninit(pdev);
+
+err_out:
+ dev_err(&pdev->dev, "PCIe device probe failed\n");
+ return err;
+}
+
+static void hinic3_remove(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ hinic3_remove_func(pci_adapter);
+ hinic3_pci_uninit(pdev);
+}
+
+static const struct pci_device_id hinic3_pci_table[] = {
+ /* Completed by later submission due to LoC limit. */
+ {0, 0}
+
+};
+
+MODULE_DEVICE_TABLE(pci, hinic3_pci_table);
+
+static void hinic3_shutdown(struct pci_dev *pdev)
+{
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+
+ pci_disable_device(pdev);
+
+ if (pci_adapter)
+ hinic3_set_api_stop(pci_adapter->hwdev);
+}
+
+static struct pci_driver hinic3_driver = {
+ .name = HINIC3_NIC_DRV_NAME,
+ .id_table = hinic3_pci_table,
+ .probe = hinic3_probe,
+ .remove = hinic3_remove,
+ .shutdown = hinic3_shutdown,
+ .sriov_configure = pci_sriov_configure_simple
+};
+
+int hinic3_lld_init(void)
+{
+ return pci_register_driver(&hinic3_driver);
+}
+
+void hinic3_lld_exit(void)
+{
+ pci_unregister_driver(&hinic3_driver);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
new file mode 100644
index 000000000000..322b44803476
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_LLD_H_
+#define _HINIC3_LLD_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct hinic3_event_info;
+
+#define HINIC3_NIC_DRV_NAME "hinic3"
+
+int hinic3_lld_init(void);
+void hinic3_lld_exit(void);
+void hinic3_adev_event_register(struct auxiliary_device *adev,
+ void (*event_handler)(struct auxiliary_device *adev,
+ struct hinic3_event_info *event));
+void hinic3_adev_event_unregister(struct auxiliary_device *adev);
+struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
new file mode 100644
index 000000000000..093aa6d775ff
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hw_comm.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_lld.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
+
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+static int hinic3_alloc_txrxqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_alloc_txqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc txqs\n");
+ return err;
+ }
+
+ err = hinic3_alloc_rxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc rxqs\n");
+ goto err_free_txqs;
+ }
+
+ return 0;
+
+err_free_txqs:
+ hinic3_free_txqs(netdev);
+
+ return err;
+}
+
+static void hinic3_free_txrxqs(struct net_device *netdev)
+{
+ hinic3_free_rxqs(netdev);
+ hinic3_free_txqs(netdev);
+}
+
+static int hinic3_init_nic_dev(struct net_device *netdev,
+ struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = hwdev->pdev;
+
+ nic_dev->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ nic_dev->hwdev = hwdev;
+ nic_dev->pdev = pdev;
+
+ nic_dev->rx_buf_len = HINIC3_RX_BUF_LEN;
+ nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD;
+ nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap;
+
+ return 0;
+}
+
+static int hinic3_sw_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
+ nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+
+ /* VF driver always uses random MAC address. During VM migration to a
+ * new device, the new device should learn the VMs old MAC rather than
+ * provide its own MAC. The product design assumes that every VF is
+ * suspectable to migration so the device avoids offering MAC address
+ * to VFs.
+ */
+ eth_hw_addr_random(netdev);
+ err = hinic3_set_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set default MAC\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxqs(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc qps\n");
+ goto err_del_mac;
+ }
+
+ return 0;
+
+err_del_mac:
+ hinic3_del_mac(hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(hwdev));
+
+ return err;
+}
+
+static void hinic3_sw_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxqs(netdev);
+ hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+}
+
+static void hinic3_assign_netdev_ops(struct net_device *netdev)
+{
+ hinic3_set_netdev_ops(netdev);
+}
+
+static void netdev_feature_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ netdev_features_t cso_fts = 0;
+ netdev_features_t tso_fts = 0;
+ netdev_features_t dft_fts;
+
+ dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM))
+ cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC))
+ cso_fts |= NETIF_F_SCTP_CRC;
+ if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO))
+ tso_fts |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->features |= dft_fts | cso_fts | tso_fts;
+}
+
+static int hinic3_set_default_hw_feature(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ err = hinic3_set_nic_feature_to_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set nic features\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_link_status_change(struct net_device *netdev,
+ bool link_status_up)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (link_status_up) {
+ if (netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = true;
+ netif_carrier_on(netdev);
+ netdev_dbg(netdev, "Link is up\n");
+ } else {
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ nic_dev->link_status_up = false;
+ netif_carrier_off(netdev);
+ netdev_dbg(netdev, "Link is down\n");
+ }
+}
+
+static void hinic3_nic_event(struct auxiliary_device *adev,
+ struct hinic3_event_info *event)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ netdev = nic_dev->netdev;
+
+ switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) {
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_UP):
+ hinic3_link_status_change(netdev, true);
+ break;
+ case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC,
+ HINIC3_NIC_EVENT_LINK_DOWN):
+ hinic3_link_status_change(netdev, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static int hinic3_nic_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev);
+ struct pci_dev *pdev = hwdev->pdev;
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ u16 max_qps, glb_func_id;
+ int err;
+
+ if (!hinic3_support_nic(hwdev)) {
+ dev_dbg(&adev->dev, "HW doesn't support nic\n");
+ return 0;
+ }
+
+ hinic3_adev_event_register(adev, hinic3_nic_event);
+
+ glb_func_id = hinic3_global_func_id(hwdev);
+ err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC);
+ if (err) {
+ dev_err(&adev->dev, "Failed to reset function\n");
+ goto err_unregister_adev_event;
+ }
+
+ max_qps = hinic3_func_max_qnum(hwdev);
+ netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps);
+ if (!netdev) {
+ dev_err(&adev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_unregister_adev_event;
+ }
+
+ nic_dev = netdev_priv(netdev);
+ dev_set_drvdata(&adev->dev, nic_dev);
+ err = hinic3_init_nic_dev(netdev, hwdev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_init_nic_io(nic_dev);
+ if (err)
+ goto err_free_netdev;
+
+ err = hinic3_sw_init(netdev);
+ if (err)
+ goto err_free_nic_io;
+
+ hinic3_assign_netdev_ops(netdev);
+
+ netdev_feature_init(netdev);
+ err = hinic3_set_default_hw_feature(netdev);
+ if (err)
+ goto err_uninit_sw;
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_uninit_nic_feature;
+
+ return 0;
+
+err_uninit_nic_feature:
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+
+err_uninit_sw:
+ hinic3_sw_uninit(netdev);
+
+err_free_nic_io:
+ hinic3_free_nic_io(nic_dev);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+err_unregister_adev_event:
+ hinic3_adev_event_unregister(adev);
+ dev_err(&pdev->dev, "NIC service probe failed\n");
+
+ return err;
+}
+
+static void hinic3_nic_remove(struct auxiliary_device *adev)
+{
+ struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev);
+ struct net_device *netdev;
+
+ if (!hinic3_support_nic(nic_dev->hwdev))
+ return;
+
+ netdev = nic_dev->netdev;
+ unregister_netdev(netdev);
+
+ hinic3_update_nic_feature(nic_dev, 0);
+ hinic3_set_nic_feature_to_hw(nic_dev);
+ hinic3_sw_uninit(netdev);
+
+ hinic3_free_nic_io(nic_dev);
+
+ free_netdev(netdev);
+}
+
+static const struct auxiliary_device_id hinic3_nic_id_table[] = {
+ {
+ .name = HINIC3_NIC_DRV_NAME ".nic",
+ },
+ {}
+};
+
+static struct auxiliary_driver hinic3_nic_driver = {
+ .probe = hinic3_nic_probe,
+ .remove = hinic3_nic_remove,
+ .suspend = NULL,
+ .resume = NULL,
+ .name = "nic",
+ .id_table = hinic3_nic_id_table,
+};
+
+static __init int hinic3_nic_lld_init(void)
+{
+ int err;
+
+ pr_info("%s: %s\n", HINIC3_NIC_DRV_NAME, HINIC3_NIC_DRV_DESC);
+
+ err = hinic3_lld_init();
+ if (err)
+ return err;
+
+ err = auxiliary_driver_register(&hinic3_nic_driver);
+ if (err) {
+ hinic3_lld_exit();
+ return err;
+ }
+
+ return 0;
+}
+
+static __exit void hinic3_nic_lld_exit(void)
+{
+ auxiliary_driver_unregister(&hinic3_nic_driver);
+
+ hinic3_lld_exit();
+}
+
+module_init(hinic3_nic_lld_init);
+module_exit(hinic3_nic_lld_exit);
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
new file mode 100644
index 000000000000..e74d1eb09730
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_common.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
new file mode 100644
index 000000000000..d7a6c37b7eff
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MBOX_H_
+#define _HINIC3_MBOX_H_
+
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+
+struct hinic3_hwdev;
+
+int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
new file mode 100644
index 000000000000..4edabeb32112
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_H_
+#define _HINIC3_MGMT_H_
+
+#include <linux/types.h>
+
+struct hinic3_hwdev;
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
new file mode 100644
index 000000000000..c4434efdc7f7
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_MGMT_INTERFACE_H_
+#define _HINIC3_MGMT_INTERFACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/if_ether.h>
+
+#include "hinic3_hw_intf.h"
+
+struct l2nic_cmd_feature_nego {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd;
+ u64 s_feature[4];
+};
+
+enum l2nic_func_tbl_cfg_bitmap {
+ L2NIC_FUNC_TBL_CFG_INIT = 0,
+ L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1,
+ L2NIC_FUNC_TBL_CFG_MTU = 2,
+};
+
+struct l2nic_func_tbl_cfg {
+ u16 rx_wqe_buf_size;
+ u16 mtu;
+ u32 rsvd[9];
+};
+
+struct l2nic_cmd_set_func_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd;
+ u32 cfg_bitmap;
+ struct l2nic_func_tbl_cfg tbl_cfg;
+};
+
+struct l2nic_cmd_set_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_update_mac {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 vlan_id;
+ u16 rsvd1;
+ u8 old_mac[ETH_ALEN];
+ u16 rsvd2;
+ u8 new_mac[ETH_ALEN];
+};
+
+struct l2nic_cmd_force_pkt_drop {
+ struct mgmt_msg_head msg_head;
+ u8 port;
+ u8 rsvd1[3];
+};
+
+/* Commands between NIC to fw */
+enum l2nic_cmd {
+ /* FUNC CFG */
+ L2NIC_CMD_SET_FUNC_TBL = 5,
+ L2NIC_CMD_SET_VPORT_ENABLE = 6,
+ L2NIC_CMD_SET_SQ_CI_ATTR = 8,
+ L2NIC_CMD_CLEAR_QP_RESOURCE = 11,
+ L2NIC_CMD_FEATURE_NEGO = 15,
+ L2NIC_CMD_SET_MAC = 21,
+ L2NIC_CMD_DEL_MAC = 22,
+ L2NIC_CMD_UPDATE_MAC = 23,
+ L2NIC_CMD_CFG_RSS = 60,
+ L2NIC_CMD_CFG_RSS_HASH_KEY = 63,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64,
+ L2NIC_CMD_SET_RSS_CTX_TBL = 65,
+ L2NIC_CMD_QOS_DCB_STATE = 110,
+ L2NIC_CMD_FORCE_PKT_DROP = 113,
+ L2NIC_CMD_MAX = 256,
+};
+
+enum hinic3_nic_feature_cap {
+ HINIC3_NIC_F_CSUM = BIT(0),
+ HINIC3_NIC_F_SCTP_CRC = BIT(1),
+ HINIC3_NIC_F_TSO = BIT(2),
+ HINIC3_NIC_F_LRO = BIT(3),
+ HINIC3_NIC_F_UFO = BIT(4),
+ HINIC3_NIC_F_RSS = BIT(5),
+ HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6),
+ HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7),
+ HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8),
+ HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9),
+ HINIC3_NIC_F_FDIR = BIT(11),
+ HINIC3_NIC_F_PROMISC = BIT(12),
+ HINIC3_NIC_F_ALLMULTI = BIT(13),
+ HINIC3_NIC_F_RATE_LIMIT = BIT(16),
+};
+
+#define HINIC3_NIC_F_ALL_MASK 0x33bff
+#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
new file mode 100644
index 000000000000..71104a6b8bef
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+#include "hinic3_tx.h"
+
+static int hinic3_open(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+static int hinic3_close(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int err;
+
+ err = hinic3_set_port_mtu(netdev, new_mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to change port mtu to %d\n",
+ new_mtu);
+ return err;
+ }
+
+ netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu);
+ WRITE_ONCE(netdev->mtu, new_mtu);
+
+ return 0;
+}
+
+static int hinic3_set_mac_addr(struct net_device *netdev, void *addr)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int err;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr,
+ saddr->sa_data, 0,
+ hinic3_global_func_id(nic_dev->hwdev));
+
+ if (err)
+ return err;
+
+ eth_hw_addr_set(netdev, saddr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops hinic3_netdev_ops = {
+ .ndo_open = hinic3_open,
+ .ndo_stop = hinic3_close,
+ .ndo_change_mtu = hinic3_change_mtu,
+ .ndo_set_mac_address = hinic3_set_mac_addr,
+ .ndo_start_xmit = hinic3_xmit_frame,
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev)
+{
+ netdev->netdev_ops = &hinic3_netdev_ops;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
new file mode 100644
index 000000000000..5b1a91a18c67
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct l2nic_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n",
+ err, feature_nego.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64));
+
+ return 0;
+}
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits)
+{
+ return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits;
+}
+
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap)
+{
+ nic_dev->nic_io->feature_cap = feature_cap;
+}
+
+static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
+ const struct l2nic_func_tbl_cfg *cfg)
+{
+ struct l2nic_cmd_set_func_tbl cmd_func_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmd_func_tbl.func_id = hinic3_global_func_id(hwdev);
+ cmd_func_tbl.cfg_bitmap = cfg_bitmap;
+ cmd_func_tbl.tbl_cfg = *cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl,
+ sizeof(cmd_func_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_FUNC_TBL, &msg_params);
+ if (err || cmd_func_tbl.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n",
+ cfg_bitmap, err, cmd_func_tbl.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ func_tbl_cfg.mtu = new_mtu;
+ return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
+ &func_tbl_cfg);
+}
+
+static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status,
+ u16 vlan_id)
+{
+ if ((status && status != MGMT_STATUS_EXIST) ||
+ ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) {
+ dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n");
+ return 0;
+ }
+
+ if (mac_info.msg_head.status == MGMT_STATUS_EXIST) {
+ dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n");
+ return 0;
+ }
+
+ return 0;
+}
+
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id)
+{
+ struct l2nic_cmd_set_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.mac, mac_addr);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_DEL_MAC, &msg_params);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to delete MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return err;
+ }
+
+ return 0;
+}
+
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id)
+{
+ struct l2nic_cmd_update_mac mac_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) {
+ dev_err(hwdev->dev, "Invalid VLAN number: %d\n",
+ (vlan_id & HINIC3_VLAN_ID_MASK));
+ return -EINVAL;
+ }
+
+ mac_info.func_id = func_id;
+ mac_info.vlan_id = vlan_id;
+ ether_addr_copy(mac_info.old_mac, old_mac);
+ ether_addr_copy(mac_info.new_mac, new_mac);
+
+ mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_UPDATE_MAC, &msg_params);
+ if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status,
+ mac_info.vlan_id)) {
+ dev_err(hwdev->dev,
+ "Failed to update MAC, err: %d, status: 0x%x\n",
+ err, mac_info.msg_head.status);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_force_pkt_drop pkt_drop = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ pkt_drop.port = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_FORCE_PKT_DROP, &msg_params);
+ if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED &&
+ pkt_drop.msg_head.status) || err) {
+ dev_err(hwdev->dev,
+ "Failed to set force tx packets drop, err: %d, status: 0x%x\n",
+ err, pkt_drop.msg_head.status);
+ return -EFAULT;
+ }
+
+ return pkt_drop.msg_head.status;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
new file mode 100644
index 000000000000..bf9ce51dc401
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_CFG_H_
+#define _HINIC3_NIC_CFG_H_
+
+#include <linux/types.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_mgmt_interface.h"
+
+struct hinic3_hwdev;
+struct hinic3_nic_dev;
+
+#define HINIC3_MIN_MTU_SIZE 256
+#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600
+
+#define HINIC3_VLAN_ID_MASK 0x7FFF
+
+enum hinic3_nic_event_type {
+ HINIC3_NIC_EVENT_LINK_DOWN = 0,
+ HINIC3_NIC_EVENT_LINK_UP = 1,
+};
+
+int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
+bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_nic_feature_cap feature_bits);
+void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+
+int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
+
+int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
+ u16 func_id);
+int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
+ u8 *new_mac, u16 vlan_id, u16 func_id);
+
+int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
new file mode 100644
index 000000000000..c994fc9b6ee0
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_DEV_H_
+#define _HINIC3_NIC_DEV_H_
+
+#include <linux/netdevice.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_mgmt_interface.h"
+
+enum hinic3_flags {
+ HINIC3_RSS_ENABLE,
+};
+
+enum hinic3_rss_hash_type {
+ HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0,
+ HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1,
+};
+
+struct hinic3_rss_type {
+ u8 tcp_ipv6_ext;
+ u8 ipv6_ext;
+ u8 tcp_ipv6;
+ u8 ipv6;
+ u8 tcp_ipv4;
+ u8 ipv4;
+ u8 udp_ipv6;
+ u8 udp_ipv4;
+};
+
+struct hinic3_irq_cfg {
+ struct net_device *netdev;
+ u16 msix_entry_idx;
+ /* provided by OS */
+ u32 irq_id;
+ char irq_name[IFNAMSIZ + 16];
+ struct napi_struct napi;
+ cpumask_t affinity_mask;
+ struct hinic3_txq *txq;
+ struct hinic3_rxq *rxq;
+};
+
+struct hinic3_dyna_txrxq_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_dyna_txq_res *txqs_res;
+ struct hinic3_dyna_rxq_res *rxqs_res;
+ struct hinic3_irq_cfg *irq_cfg;
+};
+
+struct hinic3_nic_dev {
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_nic_io *nic_io;
+
+ u16 max_qps;
+ u16 rx_buf_len;
+ u32 lro_replenish_thld;
+ unsigned long flags;
+ struct hinic3_nic_service_cap nic_svc_cap;
+
+ struct hinic3_dyna_txrxq_params q_params;
+ struct hinic3_txq *txqs;
+ struct hinic3_rxq *rxqs;
+
+ u16 num_qp_irq;
+ struct msix_entry *qps_msix_entries;
+
+ bool link_status_up;
+};
+
+void hinic3_set_netdev_ops(struct net_device *netdev);
+
+/* Temporary prototypes. Functions become static in later submission. */
+void qp_add_napi(struct hinic3_irq_cfg *irq_cfg);
+void qp_del_napi(struct hinic3_irq_cfg *irq_cfg);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
new file mode 100644
index 000000000000..34a1f5bd5ac1
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_hw_comm.h"
+#include "hinic3_hw_intf.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
new file mode 100644
index 000000000000..865ba6878c48
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_NIC_IO_H_
+#define _HINIC3_NIC_IO_H_
+
+#include <linux/bitfield.h>
+
+#include "hinic3_wq.h"
+
+struct hinic3_nic_dev;
+
+#define HINIC3_SQ_WQEBB_SHIFT 4
+#define HINIC3_RQ_WQEBB_SHIFT 3
+#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT)
+
+/* ******************** RQ_CTRL ******************** */
+enum hinic3_rq_wqe_type {
+ HINIC3_NORMAL_RQ_WQE = 1,
+};
+
+/* ******************** SQ_CTRL ******************** */
+#define HINIC3_TX_MSS_DEFAULT 0x3E00
+#define HINIC3_TX_MSS_MIN 0x50
+#define HINIC3_MAX_SQ_SGE 18
+
+struct hinic3_io_queue {
+ struct hinic3_wq wq;
+ u8 owner;
+ u16 q_id;
+ u16 msix_entry_idx;
+ u8 __iomem *db_addr;
+ u16 *cons_idx_addr;
+} ____cacheline_aligned;
+
+static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->cons_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return wq->prod_idx & wq->idx_mask;
+}
+
+static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
+{
+ const struct hinic3_wq *wq = &sq->wq;
+
+ return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask;
+}
+
+/* ******************** DB INFO ******************** */
+#define DB_INFO_QID_MASK GENMASK(12, 0)
+#define DB_INFO_CFLAG_MASK BIT(23)
+#define DB_INFO_COS_MASK GENMASK(26, 24)
+#define DB_INFO_TYPE_MASK GENMASK(31, 27)
+#define DB_INFO_SET(val, member) \
+ FIELD_PREP(DB_INFO_##member##_MASK, val)
+
+#define DB_PI_LOW_MASK 0xFFU
+#define DB_PI_HIGH_MASK 0xFFU
+#define DB_PI_HI_SHIFT 8
+#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK)
+#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK)
+#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi))
+#define DB_SRC_TYPE 1
+
+/* CFLAG_DATA_PATH */
+#define DB_CFLAG_DP_SQ 0
+#define DB_CFLAG_DP_RQ 1
+
+struct hinic3_nic_db {
+ u32 db_info;
+ u32 pi_hi;
+};
+
+static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
+ u8 cflag, u16 pi)
+{
+ struct hinic3_nic_db db;
+
+ db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID);
+ db.pi_hi = DB_PI_HIGH(pi);
+
+ writeq(*((u64 *)&db), DB_ADDR(queue, pi));
+}
+
+struct hinic3_nic_io {
+ struct hinic3_io_queue *sq;
+ struct hinic3_io_queue *rq;
+
+ u16 num_qps;
+ u16 max_qps;
+
+ /* Base address for consumer index of all tx queues. Each queue is
+ * given a full cache line to hold its consumer index. HW updates
+ * current consumer index as it consumes tx WQEs.
+ */
+ void *ci_vaddr_base;
+ dma_addr_t ci_dma_base;
+
+ u8 __iomem *sqs_db_addr;
+ u8 __iomem *rqs_db_addr;
+
+ u16 rx_buf_len;
+ u64 feature_cap;
+};
+
+int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
new file mode 100644
index 000000000000..fab9011de9ad
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/device.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_queue_common.h"
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size)
+{
+ u32 elem_per_page;
+
+ elem_per_page = min(page_size / elem_size, q_depth);
+
+ qpages->pages = NULL;
+ qpages->page_size = page_size;
+ qpages->num_pages = max(q_depth / elem_per_page, 1);
+ qpages->elem_size_shift = ilog2(elem_size);
+ qpages->elem_per_pg_shift = ilog2(elem_per_page);
+}
+
+static void __queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 pg_cnt)
+{
+ while (pg_cnt > 0) {
+ pg_cnt--;
+ hinic3_dma_free_coherent_align(hwdev->dev,
+ qpages->pages + pg_cnt);
+ }
+ kfree(qpages->pages);
+ qpages->pages = NULL;
+}
+
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages)
+{
+ __queue_pages_free(hwdev, qpages, qpages->num_pages);
+}
+
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align)
+{
+ u32 pg_idx;
+ int err;
+
+ qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]),
+ GFP_KERNEL);
+ if (!qpages->pages)
+ return -ENOMEM;
+
+ if (align == 0)
+ align = qpages->page_size;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ err = hinic3_dma_zalloc_coherent_align(hwdev->dev,
+ qpages->page_size,
+ align,
+ GFP_KERNEL,
+ qpages->pages + pg_idx);
+ if (err) {
+ __queue_pages_free(hwdev, qpages, pg_idx);
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
new file mode 100644
index 000000000000..ec4cae0a0929
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_QUEUE_COMMON_H_
+#define _HINIC3_QUEUE_COMMON_H_
+
+#include <linux/types.h>
+
+#include "hinic3_common.h"
+
+struct hinic3_hwdev;
+
+struct hinic3_queue_pages {
+ /* Array of DMA-able pages that actually holds the queue entries. */
+ struct hinic3_dma_addr_align *pages;
+ /* Page size in bytes. */
+ u32 page_size;
+ /* Number of pages, must be power of 2. */
+ u16 num_pages;
+ u8 elem_size_shift;
+ u8 elem_per_pg_shift;
+};
+
+void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth,
+ u32 page_size, u32 elem_size);
+int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages, u32 align);
+void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev,
+ struct hinic3_queue_pages *qpages);
+
+/* Get pointer to queue entry at the specified index. Index does not have to be
+ * masked to queue depth, only least significant bits will be used. Also
+ * provides remaining elements in same page (including the first one) in case
+ * caller needs multiple entries.
+ */
+static inline void *get_q_element(const struct hinic3_queue_pages *qpages,
+ u32 idx, u32 *remaining_in_page)
+{
+ const struct hinic3_dma_addr_align *page;
+ u32 page_idx, elem_idx, elem_per_pg, ofs;
+ u8 shift;
+
+ shift = qpages->elem_per_pg_shift;
+ page_idx = (idx >> shift) & (qpages->num_pages - 1);
+ elem_per_pg = 1 << shift;
+ elem_idx = idx & (elem_per_pg - 1);
+ if (remaining_in_page)
+ *remaining_in_page = elem_per_pg - elem_idx;
+ ofs = elem_idx << qpages->elem_size_shift;
+ page = qpages->pages + page_idx;
+ return (char *)page->align_vaddr + ofs;
+}
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
new file mode 100644
index 000000000000..860163e9d66c
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/netdevice.h>
+#include <net/gro.h>
+#include <net/page_pool/helpers.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_rx.h"
+
+#define HINIC3_RX_HDR_SIZE 256
+#define HINIC3_RX_BUFFER_WRITE 16
+
+#define HINIC3_RX_TCP_PKT 0x3
+#define HINIC3_RX_UDP_PKT 0x4
+#define HINIC3_RX_SCTP_PKT 0x7
+
+#define HINIC3_RX_IPV4_PKT 0
+#define HINIC3_RX_IPV6_PKT 1
+#define HINIC3_RX_INVALID_IP_TYPE 2
+
+#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0
+#define HINIC3_RX_PKT_FORMAT_VXLAN 1
+
+#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66
+#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86
+#define HINIC3_LRO_PKT_HDR_LEN(cqe) \
+ (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \
+ HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \
+ HINIC3_LRO_PKT_HDR_LEN_IPV4)
+
+int hinic3_alloc_rxqs(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+ return -EFAULT;
+}
+
+void hinic3_free_rxqs(struct net_device *netdev)
+{
+ /* Completed by later submission due to LoC limit. */
+}
+
+static int rx_alloc_mapped_page(struct page_pool *page_pool,
+ struct hinic3_rx_info *rx_info, u16 buf_len)
+{
+ struct page *page;
+ u32 page_offset;
+
+ page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx_info->page = page;
+ rx_info->page_offset = page_offset;
+
+ return 0;
+}
+
+static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
+ dma_addr_t dma_addr, u16 len)
+{
+ struct hinic3_rq_wqe *rq_wqe;
+
+ rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
+ rq_wqe->buf_hi_addr = upper_32_bits(dma_addr);
+ rq_wqe->buf_lo_addr = lower_32_bits(dma_addr);
+}
+
+static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
+{
+ u32 i, free_wqebbs = rxq->delta - 1;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t dma_addr;
+ int err;
+
+ for (i = 0; i < free_wqebbs; i++) {
+ rx_info = &rxq->rx_info[rxq->next_to_update];
+
+ err = rx_alloc_mapped_page(rxq->page_pool, rx_info,
+ rxq->buf_len);
+ if (unlikely(err))
+ break;
+
+ dma_addr = page_pool_get_dma_addr(rx_info->page) +
+ rx_info->page_offset;
+ rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
+ rxq->buf_len);
+ rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask;
+ }
+
+ if (likely(i)) {
+ hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
+ rxq->next_to_update << HINIC3_NORMAL_RQ_WQE);
+ rxq->delta -= i;
+ rxq->next_to_alloc = rxq->next_to_update;
+ }
+
+ return i;
+}
+
+static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
+ struct hinic3_rx_info *rx_info,
+ struct sk_buff *skb, u32 size)
+{
+ struct page *page;
+ u8 *va;
+
+ page = rx_info->page;
+ va = (u8 *)page_address(page) + rx_info->page_offset;
+ net_prefetch(va);
+
+ page_pool_dma_sync_for_cpu(rxq->page_pool, page, rx_info->page_offset,
+ rxq->buf_len);
+
+ if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ memcpy(__skb_put(skb, size), va,
+ ALIGN(size, sizeof(long)));
+ page_pool_put_full_page(rxq->page_pool, page, false);
+
+ return;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_info->page_offset, size, rxq->buf_len);
+ skb_mark_for_recycle(skb);
+}
+
+static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb,
+ u32 sge_num, u32 pkt_len)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 temp_pkt_len = pkt_len;
+ u32 temp_sge_num = sge_num;
+ u32 sw_ci;
+ u32 size;
+
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ while (temp_sge_num) {
+ rx_info = &rxq->rx_info[sw_ci];
+ sw_ci = (sw_ci + 1) & rxq->q_mask;
+ if (unlikely(temp_pkt_len > rxq->buf_len)) {
+ size = rxq->buf_len;
+ temp_pkt_len -= rxq->buf_len;
+ } else {
+ size = temp_pkt_len;
+ }
+
+ hinic3_add_rx_frag(rxq, rx_info, skb, size);
+
+ /* clear contents of buffer_info */
+ rx_info->page = NULL;
+ temp_sge_num--;
+ }
+}
+
+static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len)
+{
+ u32 sge_num;
+
+ sge_num = pkt_len >> rxq->buf_len_shift;
+ sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0;
+
+ return sge_num;
+}
+
+static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
+ u32 pkt_len)
+{
+ struct sk_buff *skb;
+ u32 sge_num;
+
+ skb = napi_alloc_skb(&rxq->irq_cfg->napi, HINIC3_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ sge_num = hinic3_get_sge_num(rxq, pkt_len);
+
+ net_prefetchw(skb->data);
+ packaging_skb(rxq, skb, sge_num, pkt_len);
+
+ rxq->cons_idx += sge_num;
+ rxq->delta += sge_num;
+
+ return skb;
+}
+
+static void hinic3_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type,
+ u32 status, struct sk_buff *skb)
+{
+ u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT);
+ u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE);
+ u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE);
+ u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR);
+ struct net_device *netdev = rxq->netdev;
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (unlikely(csum_err)) {
+ /* pkt type is recognized by HW, and csum is wrong */
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ if (ip_type == HINIC3_RX_INVALID_IP_TYPE ||
+ !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL ||
+ pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ return;
+ }
+
+ switch (pkt_type) {
+ case HINIC3_RX_TCP_PKT:
+ case HINIC3_RX_UDP_PKT:
+ case HINIC3_RX_SCTP_PKT:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ }
+}
+
+static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ __be16 proto;
+
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb),
+ num_lro);
+ skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = num_lro;
+}
+
+static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
+ u32 pkt_len, u32 vlan_len, u32 status)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct sk_buff *skb;
+ u32 offload_type;
+ u16 num_lro;
+
+ skb = hinic3_fetch_rx_buffer(rxq, pkt_len);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ hinic3_pull_tail(skb);
+
+ offload_type = rx_cqe->offload_type;
+ hinic3_rx_csum(rxq, offload_type, status, skb);
+
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ hinic3_lro_set_gso_params(skb, num_lro);
+
+ skb_record_rx_queue(skb, rxq->q_id);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb_has_frag_list(skb)) {
+ napi_gro_flush(&rxq->irq_cfg->napi, false);
+ netif_receive_skb(skb);
+ } else {
+ napi_gro_receive(&rxq->irq_cfg->napi, skb);
+ }
+
+ return 0;
+}
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
+ u32 sw_ci, status, pkt_len, vlan_len;
+ struct hinic3_rq_cqe *rx_cqe;
+ u32 num_wqe = 0;
+ int nr_pkts = 0;
+ u16 num_lro;
+
+ while (likely(nr_pkts < budget)) {
+ sw_ci = rxq->cons_idx & rxq->q_mask;
+ rx_cqe = rxq->cqe_arr + sw_ci;
+ status = rx_cqe->status;
+ if (!RQ_CQE_STATUS_GET(status, RXDONE))
+ break;
+
+ /* make sure we read rx_done before packet length */
+ rmb();
+
+ vlan_len = rx_cqe->vlan_len;
+ pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
+ if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
+ break;
+
+ nr_pkts++;
+ num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
+ if (num_lro)
+ num_wqe += hinic3_get_sge_num(rxq, pkt_len);
+
+ rx_cqe->status = 0;
+
+ if (num_wqe >= nic_dev->lro_replenish_thld)
+ break;
+ }
+
+ if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
+ hinic3_rx_fill_buffers(rxq);
+
+ return nr_pkts;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
new file mode 100644
index 000000000000..1cca21858d40
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RX_H_
+#define _HINIC3_RX_H_
+
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
+#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0)
+#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5)
+#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8)
+#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21)
+#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \
+ FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val)
+
+#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0)
+#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16)
+#define RQ_CQE_SGE_GET(val, member) \
+ FIELD_GET(RQ_CQE_SGE_##member##_MASK, val)
+
+#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0)
+#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16)
+#define RQ_CQE_STATUS_RXDONE_MASK BIT(31)
+#define RQ_CQE_STATUS_GET(val, member) \
+ FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val)
+
+/* RX Completion information that is provided by HW for a specific RX WQE */
+struct hinic3_rq_cqe {
+ u32 status;
+ u32 vlan_len;
+ u32 offload_type;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 rsvd6;
+ u32 pkt_info;
+};
+
+struct hinic3_rq_wqe {
+ u32 buf_hi_addr;
+ u32 buf_lo_addr;
+ u32 cqe_hi_addr;
+ u32 cqe_lo_addr;
+};
+
+struct hinic3_rx_info {
+ struct page *page;
+ u32 page_offset;
+};
+
+struct hinic3_rxq {
+ struct net_device *netdev;
+
+ u16 q_id;
+ u32 q_depth;
+ u32 q_mask;
+
+ u16 buf_len;
+ u32 buf_len_shift;
+
+ u32 cons_idx;
+ u32 delta;
+
+ u32 irq_id;
+ u16 msix_entry_idx;
+
+ /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is
+ * statically associated (by index) to a specific rq_wqe.
+ */
+ struct hinic3_rq_cqe *cqe_arr;
+ struct hinic3_rx_info *rx_info;
+ struct page_pool *page_pool;
+
+ struct hinic3_io_queue *rq;
+
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 next_to_alloc;
+ u16 next_to_update;
+ struct device *dev; /* device for DMA mapping */
+
+ dma_addr_t cqe_start_paddr;
+} ____cacheline_aligned;
+
+int hinic3_alloc_rxqs(struct net_device *netdev);
+void hinic3_free_rxqs(struct net_device *netdev);
+
+int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
new file mode 100644
index 000000000000..ae08257dd1d2
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <net/ip6_checksum.h>
+#include <net/ipv6.h>
+#include <net/netdev_queues.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_nic_io.h"
+#include "hinic3_tx.h"
+#include "hinic3_wq.h"
+
+#define MIN_SKB_LEN 32
+
+int hinic3_alloc_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id, num_txqs = nic_dev->max_qps;
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_txq *txq;
+
+ if (!num_txqs) {
+ dev_err(hwdev->dev, "Cannot allocate zero size txqs\n");
+ return -EINVAL;
+ }
+
+ nic_dev->txqs = kcalloc(num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL);
+ if (!nic_dev->txqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_txqs; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ txq->netdev = netdev;
+ txq->q_id = q_id;
+ txq->q_depth = nic_dev->q_params.sq_depth;
+ txq->q_mask = nic_dev->q_params.sq_depth - 1;
+ txq->dev = &pdev->dev;
+ }
+
+ return 0;
+}
+
+void hinic3_free_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->txqs);
+}
+
+static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
+ dma_addr_t addr, u32 len)
+{
+ buf_descs->hi_addr = upper_32_bits(addr);
+ buf_descs->lo_addr = lower_32_bits(addr);
+ buf_descs->len = len;
+}
+
+static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct hinic3_txq *txq,
+ struct hinic3_tx_info *tx_info,
+ struct hinic3_sq_wqe_combo *wqe_combo)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+ struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dma_info *dma_info = tx_info->dma_info;
+ struct pci_dev *pdev = nic_dev->pdev;
+ skb_frag_t *frag;
+ u32 i, idx;
+ int err;
+
+ dma_info[0].dma = dma_map_single(&pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[0].dma))
+ return -EFAULT;
+
+ dma_info[0].len = skb_headlen(skb);
+
+ wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma);
+ wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma);
+
+ wqe_desc->ctrl_len = dma_info[0].len;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &(skb_shinfo(skb)->frags[i]);
+ if (unlikely(i == wqe_combo->first_bds_num))
+ buf_desc = wqe_combo->bds_sec2;
+
+ idx = i + 1;
+ dma_info[idx].dma = skb_frag_dma_map(&pdev->dev, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) {
+ err = -EFAULT;
+ goto err_unmap_page;
+ }
+ dma_info[idx].len = skb_frag_size(frag);
+
+ hinic3_set_buf_desc(buf_desc, dma_info[idx].dma,
+ dma_info[idx].len);
+ buf_desc++;
+ }
+
+ return 0;
+
+err_unmap_page:
+ while (idx > 1) {
+ idx--;
+ dma_unmap_page(&pdev->dev, dma_info[idx].dma,
+ dma_info[idx].len, DMA_TO_DEVICE);
+ }
+ dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
+ DMA_TO_DEVICE);
+ return err;
+}
+
+static void hinic3_tx_unmap_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct hinic3_dma_info *dma_info)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags;) {
+ i++;
+ dma_unmap_page(&pdev->dev,
+ dma_info[i].dma,
+ dma_info[i].len, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&pdev->dev, dma_info[0].dma,
+ dma_info[0].len, DMA_TO_DEVICE);
+}
+
+union hinic3_ip {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union hinic3_l4 {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+};
+
+enum hinic3_l3_type {
+ HINIC3_L3_UNKNOWN = 0,
+ HINIC3_L3_IP6_PKT = 1,
+ HINIC3_L3_IP4_PKT_NO_CSUM = 2,
+ HINIC3_L3_IP4_PKT_CSUM = 3,
+};
+
+enum hinic3_l4_offload_type {
+ HINIC3_L4_OFFLOAD_DISABLE = 0,
+ HINIC3_L4_OFFLOAD_TCP = 1,
+ HINIC3_L4_OFFLOAD_STCP = 2,
+ HINIC3_L4_OFFLOAD_UDP = 3,
+};
+
+/* initialize l4 offset and offload */
+static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4,
+ u8 l4_proto, u32 *offset,
+ enum hinic3_l4_offload_type *l4_offload)
+{
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ *l4_offload = HINIC3_L4_OFFLOAD_TCP;
+ /* To be same with TSO, payload offset begins from payload */
+ *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) +
+ TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+
+ case IPPROTO_UDP:
+ *l4_offload = HINIC3_L4_OFFLOAD_UDP;
+ *offset = TRANSPORT_OFFSET(l4->hdr, skb);
+ break;
+ default:
+ *l4_offload = HINIC3_L4_OFFLOAD_DISABLE;
+ *offset = 0;
+ }
+}
+
+static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
+ struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->encapsulation) {
+ union hinic3_ip ip;
+ u8 l4_proto;
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+
+ ip.hdr = skb_network_header(skb);
+ if (ip.v4->version == 4) {
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ union hinic3_l4 l4;
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
+ l4_proto = IPPROTO_RAW;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ ((struct udphdr *)skb_transport_header(skb))->dest !=
+ VXLAN_OFFLOAD_PORT_LE) {
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
+ }
+
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+
+ return 1;
+}
+
+static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
+ union hinic3_l4 *l4,
+ enum hinic3_l3_type *l3_type, u8 *l4_proto)
+{
+ unsigned char *exthdr;
+ __be16 frag_off;
+
+ if (ip->v4->version == 4) {
+ *l3_type = HINIC3_L3_IP4_PKT_CSUM;
+ *l4_proto = ip->v4->protocol;
+ } else if (ip->v4->version == 6) {
+ *l3_type = HINIC3_L3_IP6_PKT;
+ exthdr = ip->hdr + sizeof(*ip->v6);
+ *l4_proto = ip->v6->nexthdr;
+ if (exthdr != l4->hdr) {
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ l4_proto, &frag_off);
+ }
+ } else {
+ *l3_type = HINIC3_L3_UNKNOWN;
+ *l4_proto = 0;
+ }
+}
+
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info,
+ enum hinic3_l4_offload_type l4_offload,
+ u32 offset, u32 mss)
+{
+ if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ }
+
+ /* enable L3 calculation */
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN);
+
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF);
+
+ /* set MSS value */
+ *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+}
+
+static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
+{
+ return (ip->v4->version == 4) ?
+ csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
+ csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
+}
+
+static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
+ struct sk_buff *skb)
+{
+ enum hinic3_l4_offload_type l4_offload;
+ enum hinic3_l3_type l3_type;
+ union hinic3_ip ip;
+ union hinic3_l4 l4;
+ u8 l4_proto;
+ u32 offset;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (skb->encapsulation) {
+ u32 gso_type = skb_shinfo(skb)->gso_type;
+ /* L3 checksum is always enabled */
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+
+ l4.hdr = skb_transport_header(skb);
+ ip.hdr = skb_network_header(skb);
+
+ if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
+ task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN);
+ }
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto);
+
+ if (l4_proto == IPPROTO_TCP)
+ l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
+
+ get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload);
+
+ hinic3_set_tso_info(task, queue_info, l4_offload, offset,
+ skb_shinfo(skb)->gso_size);
+
+ return 1;
+}
+
+static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
+ u16 vlan_tag, u8 vlan_tpid)
+{
+ /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU
+ * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
+ * 4=select TPID4 in IPSU
+ */
+ task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID);
+}
+
+static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
+ u32 *queue_info, struct hinic3_txq *txq)
+{
+ u32 offload = 0;
+ int tso_cs_en;
+
+ task->pkt_info0 = 0;
+ task->ip_identify = 0;
+ task->rsvd = 0;
+ task->vlan_offload = 0;
+
+ tso_cs_en = hinic3_tso(task, queue_info, skb);
+ if (tso_cs_en < 0) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ } else if (tso_cs_en) {
+ offload |= HINIC3_TX_OFFLOAD_TSO;
+ } else {
+ tso_cs_en = hinic3_tx_csum(txq, task, skb);
+ if (tso_cs_en)
+ offload |= HINIC3_TX_OFFLOAD_CSUM;
+ }
+
+#define VLAN_INSERT_MODE_MAX 5
+ if (unlikely(skb_vlan_tag_present(skb))) {
+ /* select vlan insert mode by qid, default 802.1Q Tag type */
+ hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb),
+ txq->q_id % VLAN_INSERT_MODE_MAX);
+ offload |= HINIC3_TX_OFFLOAD_VLAN;
+ }
+
+ if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) >
+ SQ_CTRL_MAX_PLDOFF)) {
+ offload = HINIC3_TX_OFFLOAD_INVALID;
+ return offload;
+ }
+
+ return offload;
+}
+
+static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq,
+ u16 curr_pi, u16 wqebb_cnt)
+{
+ u16 owner = sq->owner;
+
+ if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth))
+ sq->owner = !sq->owner;
+
+ return owner;
+}
+
+static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
+ struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 offload, u16 num_sge, u16 *curr_pi)
+{
+ struct hinic3_sq_bufdesc *first_part_wqebbs, *second_part_wqebbs;
+ u16 first_part_wqebbs_num, tmp_pi;
+
+ wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi);
+ if (!offload && num_sge == 1) {
+ wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE;
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1);
+ }
+
+ wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE;
+
+ if (offload) {
+ wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq,
+ &tmp_pi);
+ wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES;
+ } else {
+ wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS;
+ }
+
+ if (num_sge > 1) {
+ /* first wqebb contain bd0, and bd size is equal to sq wqebb
+ * size, so we use (num_sge - 1) as wanted weqbb_cnt
+ */
+ hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi,
+ &first_part_wqebbs,
+ &second_part_wqebbs,
+ &first_part_wqebbs_num);
+ wqe_combo->bds_head = first_part_wqebbs;
+ wqe_combo->bds_sec2 = second_part_wqebbs;
+ wqe_combo->first_bds_num = first_part_wqebbs_num;
+ }
+
+ return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi,
+ num_sge + !!offload);
+}
+
+static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
+ u32 queue_info, int nr_descs, u16 owner)
+{
+ struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
+
+ if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
+ wqe_desc->ctrl_len |=
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ /* compact wqe queue_info will transfer to chip */
+ wqe_desc->queue_info = 0;
+ return;
+ }
+
+ wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER);
+
+ wqe_desc->queue_info = queue_info;
+ wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC);
+
+ if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
+ wqe_desc->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS);
+ } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
+ HINIC3_TX_MSS_MIN) {
+ /* mss should not be less than 80 */
+ wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ wqe_desc->queue_info |=
+ SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS);
+ }
+}
+
+static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct hinic3_txq *txq)
+{
+ struct hinic3_sq_wqe_combo wqe_combo = {};
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_txq *tx_q = txq;
+ u32 offload, queue_info = 0;
+ struct hinic3_sq_task task;
+ u16 wqebb_cnt, num_sge;
+ u16 saved_wq_prod_idx;
+ u16 owner, pi = 0;
+ u8 saved_sq_owner;
+ int err;
+
+ if (unlikely(skb->len < MIN_SKB_LEN)) {
+ if (skb_pad(skb, MIN_SKB_LEN - skb->len))
+ goto err_out;
+
+ skb->len = MIN_SKB_LEN;
+ }
+
+ num_sge = skb_shinfo(skb)->nr_frags + 1;
+ /* assume normal wqe format + 1 wqebb for task info */
+ wqebb_cnt = num_sge + 1;
+
+ if (unlikely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) {
+ if (likely(wqebb_cnt > txq->tx_stop_thrs))
+ txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
+
+ netif_subqueue_try_stop(netdev, tx_q->sq->q_id,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_start_thrs);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ offload = hinic3_tx_offload(skb, &task, &queue_info, txq);
+ if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) {
+ goto err_drop_pkt;
+ } else if (!offload) {
+ wqebb_cnt -= 1;
+ if (unlikely(num_sge == 1 &&
+ skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN))
+ goto err_drop_pkt;
+ }
+
+ saved_wq_prod_idx = txq->sq->wq.prod_idx;
+ saved_sq_owner = txq->sq->owner;
+
+ owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi);
+ if (offload)
+ *wqe_combo.task = task;
+
+ tx_info = &txq->tx_info[pi];
+ tx_info->skb = skb;
+ tx_info->wqebb_cnt = wqebb_cnt;
+
+ err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo);
+ if (err) {
+ /* Rollback work queue to reclaim the wqebb we did not use */
+ txq->sq->wq.prod_idx = saved_wq_prod_idx;
+ txq->sq->owner = saved_sq_owner;
+ goto err_drop_pkt;
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id),
+ skb->len);
+ netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_stop_thrs,
+ tx_q->tx_start_thrs);
+
+ hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
+ hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
+ hinic3_get_sq_local_pi(txq->sq));
+
+ return NETDEV_TX_OK;
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+
+err_out:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 q_id = skb_get_queue_mapping(skb);
+
+ if (unlikely(!netif_carrier_ok(netdev)))
+ goto err_drop_pkt;
+
+ if (unlikely(q_id >= nic_dev->q_params.num_qps))
+ goto err_drop_pkt;
+
+ return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]);
+
+err_drop_pkt:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq)
+{
+ u16 sw_pi, hw_ci;
+
+ sw_pi = hinic3_get_sq_local_pi(sq);
+ hw_ci = hinic3_get_sq_hw_ci(sq);
+
+ return sw_pi == hw_ci;
+}
+
+#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000
+#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000
+static int hinic3_stop_sq(struct hinic3_txq *txq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev);
+ int err, rc;
+
+ err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc,
+ is_hw_complete_sq_process(txq->sq) || rc,
+ HINIC3_FLUSH_QUEUE_POLL_SLEEP_US,
+ HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US,
+ true, nic_dev->hwdev);
+ if (rc)
+ return rc;
+ else
+ return err;
+}
+
+/* packet transmission should be stopped before calling this function */
+void hinic3_flush_txqs(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 qid;
+ int err;
+
+ for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) {
+ err = hinic3_stop_sq(&nic_dev->txqs[qid]);
+ netdev_tx_reset_subqueue(netdev, qid);
+ if (err)
+ netdev_err(netdev, "Failed to stop sq%u\n", qid);
+ }
+}
+
+#define HINIC3_BDS_PER_SQ_WQEBB \
+ (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
+{
+ struct net_device *netdev = txq->netdev;
+ u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_txq *tx_q = txq;
+ unsigned int bytes_compl = 0;
+ unsigned int pkts = 0;
+ u16 wqebb_cnt = 0;
+
+ hw_ci = hinic3_get_sq_hw_ci(txq->sq);
+ dma_rmb();
+ sw_ci = hinic3_get_sq_local_ci(txq->sq);
+
+ do {
+ tx_info = &txq->tx_info[sw_ci];
+
+ /* Did all wqebb of this wqe complete? */
+ if (hw_ci == sw_ci ||
+ ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt)
+ break;
+
+ sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask;
+ net_prefetch(&txq->tx_info[sw_ci]);
+
+ wqebb_cnt += tx_info->wqebb_cnt;
+ bytes_compl += tx_info->skb->len;
+ pkts++;
+
+ hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info);
+ napi_consume_skb(tx_info->skb, budget);
+ tx_info->skb = NULL;
+ } while (likely(pkts < HINIC3_TX_POLL_WEIGHT));
+
+ hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
+
+ netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
+ hinic3_wq_free_wqebbs(&tx_q->sq->wq),
+ tx_q->tx_start_thrs);
+
+ return pkts == HINIC3_TX_POLL_WEIGHT;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
new file mode 100644
index 000000000000..9e505cc19dd5
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_TX_H_
+#define _HINIC3_TX_H_
+
+#include <linux/bitops.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <net/checksum.h>
+
+#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789)
+#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2
+#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data)
+
+#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383
+#define HINIC3_TX_POLL_WEIGHT 64
+#define HINIC3_DEFAULT_STOP_THRS 6
+#define HINIC3_DEFAULT_START_THRS 24
+
+enum sq_wqe_data_format {
+ SQ_NORMAL_WQE = 0,
+};
+
+enum sq_wqe_ec_type {
+ SQ_WQE_COMPACT_TYPE = 0,
+ SQ_WQE_EXTENDED_TYPE = 1,
+};
+
+enum sq_wqe_tasksect_len_type {
+ SQ_WQE_TASKSECT_46BITS = 0,
+ SQ_WQE_TASKSECT_16BYTES = 1,
+};
+
+enum hinic3_tx_offload_type {
+ HINIC3_TX_OFFLOAD_TSO = BIT(0),
+ HINIC3_TX_OFFLOAD_CSUM = BIT(1),
+ HINIC3_TX_OFFLOAD_VLAN = BIT(2),
+ HINIC3_TX_OFFLOAD_INVALID = BIT(3),
+ HINIC3_TX_OFFLOAD_ESP = BIT(4),
+};
+
+#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19)
+#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27)
+#define SQ_CTRL_DATA_FORMAT_MASK BIT(28)
+#define SQ_CTRL_EXTENDED_MASK BIT(30)
+#define SQ_CTRL_OWNER_MASK BIT(31)
+#define SQ_CTRL_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_##member##_MASK, val)
+
+#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2)
+#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10)
+#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11)
+#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13)
+#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28)
+
+#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
+ FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+
+#define SQ_CTRL_MAX_PLDOFF 221
+
+#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19)
+#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24)
+#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25)
+#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27)
+#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28)
+#define SQ_TASK_INFO0_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val)
+
+#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0)
+#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16)
+#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19)
+#define SQ_TASK_INFO3_SET(val, member) \
+ FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
+
+struct hinic3_sq_wqe_desc {
+ u32 ctrl_len;
+ u32 queue_info;
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+struct hinic3_sq_task {
+ u32 pkt_info0;
+ u32 ip_identify;
+ u32 rsvd;
+ u32 vlan_offload;
+};
+
+struct hinic3_sq_wqe_combo {
+ struct hinic3_sq_wqe_desc *ctrl_bd0;
+ struct hinic3_sq_task *task;
+ struct hinic3_sq_bufdesc *bds_head;
+ struct hinic3_sq_bufdesc *bds_sec2;
+ u16 first_bds_num;
+ u32 wqe_type;
+ u32 task_type;
+};
+
+struct hinic3_dma_info {
+ dma_addr_t dma;
+ u32 len;
+};
+
+struct hinic3_tx_info {
+ struct sk_buff *skb;
+ u16 wqebb_cnt;
+ struct hinic3_dma_info *dma_info;
+};
+
+struct hinic3_txq {
+ struct net_device *netdev;
+ struct device *dev;
+
+ u16 q_id;
+ u16 tx_stop_thrs;
+ u16 tx_start_thrs;
+ u32 q_mask;
+ u32 q_depth;
+
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_io_queue *sq;
+} ____cacheline_aligned;
+
+int hinic3_alloc_txqs(struct net_device *netdev);
+void hinic3_free_txqs(struct net_device *netdev);
+
+netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
+void hinic3_flush_txqs(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
new file mode 100644
index 000000000000..2ac7efcd1365
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/dma-mapping.h>
+
+#include "hinic3_hwdev.h"
+#include "hinic3_wq.h"
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num)
+{
+ u32 idx, remaining;
+
+ idx = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = idx;
+ *first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
+ if (likely(remaining >= num_wqebbs)) {
+ *first_part_wqebbs_num = num_wqebbs;
+ *second_part_wqebbs = NULL;
+ } else {
+ *first_part_wqebbs_num = remaining;
+ idx += remaining;
+ *second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
new file mode 100644
index 000000000000..ab37893efd7e
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_WQ_H_
+#define _HINIC3_WQ_H_
+
+#include <linux/io.h>
+
+#include "hinic3_queue_common.h"
+
+struct hinic3_sq_bufdesc {
+ /* 31-bits Length, L2NIC only uses length[17:0] */
+ u32 len;
+ u32 rsvd;
+ u32 hi_addr;
+ u32 lo_addr;
+};
+
+/* Work queue is used to submit elements (tx, rx, cmd) to hw.
+ * Driver is the producer that advances prod_idx. cons_idx is advanced when
+ * HW reports completions of previously submitted elements.
+ */
+struct hinic3_wq {
+ struct hinic3_queue_pages qpages;
+ /* Unmasked producer/consumer indices that are advanced to natural
+ * integer overflow regardless of queue depth.
+ */
+ u16 cons_idx;
+ u16 prod_idx;
+
+ u32 q_depth;
+ u16 idx_mask;
+
+ /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical
+ * Address (CLA) using 1 of 2 levels:
+ * level 0 - direct mapping of single wq page
+ * level 1 - indirect mapping of multiple pages via additional page
+ * table.
+ * When wq uses level 1, wq_block will hold the allocated indirection
+ * table.
+ */
+ dma_addr_t wq_block_paddr;
+ __be64 *wq_block_vaddr;
+} ____cacheline_aligned;
+
+/* Get number of elements in work queue that are in-use. */
+static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq)
+{
+ return READ_ONCE(wq->prod_idx) - READ_ONCE(wq->cons_idx);
+}
+
+static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq)
+{
+ /* Don't allow queue to become completely full, report (free - 1). */
+ return wq->q_depth - hinic3_wq_get_used(wq) - 1;
+}
+
+static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
+{
+ *pi = wq->prod_idx & wq->idx_mask;
+ wq->prod_idx++;
+ return get_q_element(&wq->qpages, *pi, NULL);
+}
+
+static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+}
+
+void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
+ u16 num_wqebbs, u16 *prod_idx,
+ struct hinic3_sq_bufdesc **first_part_wqebbs,
+ struct hinic3_sq_bufdesc **second_part_wqebbs,
+ u16 *first_part_wqebbs_num);
+
+#endif
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..4f4b23465c47 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -27,6 +27,19 @@ config IBMVETH
To compile this driver as a module, choose M here. The module will
be called ibmveth.
+config IBMVETH_KUNIT_TEST
+ bool "KUnit test for IBM LAN Virtual Ethernet support" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on KUNIT=y && IBMVETH=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the IBM LAN Virtual Ethernet driver.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
source "drivers/net/ethernet/ibm/emac/Kconfig"
config EHEA
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 04192190beba..24046fe16634 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,8 +39,6 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
index = pool->free_map[free_index];
skb = NULL;
- BUG_ON(index == IBM_VETH_INVALID_MAP);
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ schedule_work(&adapter->work);
+ goto bad_index_failure;
+ }
/* are we allocating a new buffer or recycling an old one */
if (pool->skbuff[index])
@@ -300,6 +301,7 @@ failure:
DMA_FROM_DEVICE);
dev_kfree_skb_any(pool->skbuff[index]);
pool->skbuff[index] = NULL;
+bad_index_failure:
adapter->replenish_add_buff_failure++;
mb();
@@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
}
}
-/* remove a buffer from a pool */
-static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator, bool reuse)
+/**
+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
+ * @adapter: adapter instance
+ * @correlator: identifies pool and index
+ * @reuse: whether to reuse buffer
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - correlator maps to pool or index out of range
+ * * %-EFAULT - pool and index map to null skb
+ */
+static int ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
unsigned int free_index;
struct sk_buff *skb;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return -EINVAL;
+ }
skb = adapter->rx_buff_pool[pool].skbuff[index];
- BUG_ON(skb == NULL);
+ if (WARN_ON(!skb)) {
+ schedule_work(&adapter->work);
+ return -EFAULT;
+ }
/* if we are going to reuse the buffer then keep the pointers around
* but mark index as available. replenish will see the skb pointer and
@@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
mb();
atomic_dec(&(adapter->rx_buff_pool[pool].available));
+
+ return 0;
}
/* get the current buffer on the rx queue */
@@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
+ if (WARN_ON(pool >= IBMVETH_NUM_BUFF_POOLS) ||
+ WARN_ON(index >= adapter->rx_buff_pool[pool].size)) {
+ schedule_work(&adapter->work);
+ return NULL;
+ }
return adapter->rx_buff_pool[pool].skbuff[index];
}
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
- bool reuse)
+/**
+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
+ *
+ * @adapter: pointer to adapter
+ * @reuse: whether to reuse buffer
+ *
+ * Context: called from ibmveth_poll
+ *
+ * Return:
+ * * %0 - success
+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
+ */
+static int ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
u64 cor;
+ int rc;
cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
- ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ rc = ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
+ if (unlikely(rc))
+ return rc;
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
}
+
+ return 0;
}
static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
@@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
+/**
+ * ibmveth_reset - Handle scheduled reset work
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
+ * ibmveth_close. It can't be called directly in a context that has
+ * already acquired rtnl_mutex or disabled its NAPI, or directly from
+ * a poll routine.
+ *
+ * Return: void
+ */
+static void ibmveth_reset(struct work_struct *w)
+{
+ struct ibmveth_adapter *adapter = container_of(w, struct ibmveth_adapter, work);
+ struct net_device *netdev = adapter->netdev;
+
+ netdev_dbg(netdev, "reset starting\n");
+
+ rtnl_lock();
+
+ dev_close(adapter->netdev);
+ dev_open(adapter->netdev, NULL);
+
+ rtnl_unlock();
+
+ netdev_dbg(netdev, "reset complete\n");
+}
+
static int ibmveth_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
@@ -1324,7 +1393,8 @@ restart_poll:
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1334,6 +1404,8 @@ restart_poll:
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
+ if (unlikely(!skb))
+ break;
/* if the large packet bit is set in the rx queue
* descriptor, the mss will be written by PHYP eight
@@ -1357,10 +1429,12 @@ restart_poll:
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- ibmveth_rxq_harvest_buffer(adapter, true);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, true)))
+ break;
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter, false);
+ if (unlikely(ibmveth_rxq_harvest_buffer(adapter, false)))
+ break;
skb_reserve(skb, offset);
}
@@ -1407,7 +1481,10 @@ restart_poll:
* then check once more to make sure we are done.
*/
lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ if (WARN_ON(lpar_rc != H_SUCCESS)) {
+ schedule_work(&adapter->work);
+ goto out;
+ }
if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
if (napi_schedule_prep(&adapter->napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- BUG_ON(lpar_rc != H_SUCCESS);
+ WARN_ON(lpar_rc != H_SUCCESS);
__napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
+ INIT_WORK(&adapter->work, ibmveth_reset);
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
ibmveth_init_link_settings(netdev);
@@ -1762,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
int i;
+ cancel_work_sync(&adapter->work);
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
kobject_put(&adapter->rx_buff_pool[i].kobj);
@@ -1791,6 +1871,26 @@ static ssize_t veth_pool_show(struct kobject *kobj,
return 0;
}
+/**
+ * veth_pool_store - sysfs store handler for pool attributes
+ * @kobj: kobject embedded in pool
+ * @attr: attribute being changed
+ * @buf: value being stored
+ * @count: length of @buf in bytes
+ *
+ * Stores new value in pool attribute. Verifies the range of the new value for
+ * size and buff_size. Verifies that at least one pool remains available to
+ * receive MTU-sized packets.
+ *
+ * Context: Process context.
+ * Takes and releases rtnl_mutex to ensure correct ordering of close
+ * and open calls.
+ * Return:
+ * * %-EPERM - Not allowed to disabled all MTU-sized buffer pools
+ * * %-EINVAL - New pool size or buffer size is out of range
+ * * count - Return count for success
+ * * other - Return value from a failed ibmveth_open call
+ */
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
@@ -1800,28 +1900,30 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
struct net_device *netdev = dev_get_drvdata(kobj_to_dev(kobj->parent));
struct ibmveth_adapter *adapter = netdev_priv(netdev);
long value = simple_strtol(buf, NULL, 10);
+ bool change = false;
+ u32 newbuff_size;
+ u32 oldbuff_size;
+ int newactive;
+ int oldactive;
+ u32 newsize;
+ u32 oldsize;
long rc;
rtnl_lock();
+ oldbuff_size = pool->buff_size;
+ oldactive = pool->active;
+ oldsize = pool->size;
+
+ newbuff_size = oldbuff_size;
+ newactive = oldactive;
+ newsize = oldsize;
+
if (attr == &veth_active_attr) {
- if (value && !pool->active) {
- if (netif_running(netdev)) {
- if (ibmveth_alloc_buffer_pool(pool)) {
- netdev_err(netdev,
- "unable to alloc pool\n");
- rc = -ENOMEM;
- goto unlock_err;
- }
- pool->active = 1;
- ibmveth_close(netdev);
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->active = 1;
- }
- } else if (!value && pool->active) {
+ if (value && !oldactive) {
+ newactive = 1;
+ change = true;
+ } else if (!value && oldactive) {
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
int i;
/* Make sure there is a buffer pool with buffers that
@@ -1841,43 +1943,44 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
goto unlock_err;
}
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->active = 0;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- }
- pool->active = 0;
+ newactive = 0;
+ change = true;
}
} else if (attr == &veth_num_attr) {
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
rc = -EINVAL;
goto unlock_err;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->size = value;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->size = value;
- }
+ }
+ if (value != oldsize) {
+ newsize = value;
+ change = true;
}
} else if (attr == &veth_size_attr) {
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
rc = -EINVAL;
goto unlock_err;
- } else {
- if (netif_running(netdev)) {
- ibmveth_close(netdev);
- pool->buff_size = value;
- rc = ibmveth_open(netdev);
- if (rc)
- goto unlock_err;
- } else {
- pool->buff_size = value;
+ }
+ if (value != oldbuff_size) {
+ newbuff_size = value;
+ change = true;
+ }
+ }
+
+ if (change) {
+ if (netif_running(netdev))
+ ibmveth_close(netdev);
+
+ pool->active = newactive;
+ pool->buff_size = newbuff_size;
+ pool->size = newsize;
+
+ if (netif_running(netdev)) {
+ rc = ibmveth_open(netdev);
+ if (rc) {
+ pool->active = oldactive;
+ pool->buff_size = oldbuff_size;
+ pool->size = oldsize;
+ goto unlock_err;
}
}
}
@@ -1962,3 +2065,132 @@ static void __exit ibmveth_module_exit(void)
module_init(ibmveth_module_init);
module_exit(ibmveth_module_exit);
+
+#ifdef CONFIG_IBMVETH_KUNIT_TEST
+#include <kunit/test.h>
+
+/**
+ * ibmveth_reset_kunit - reset routine for running in KUnit environment
+ *
+ * @w: pointer to work_struct embedded in adapter structure
+ *
+ * Context: Called in the KUnit environment. Does nothing.
+ *
+ * Return: void
+ */
+static void ibmveth_reset_kunit(struct work_struct *w)
+{
+ netdev_dbg(NULL, "reset_kunit starting\n");
+ netdev_dbg(NULL, "reset_kunit complete\n");
+}
+
+/**
+ * ibmveth_remove_buffer_from_pool_test - unit test for some of
+ * ibmveth_remove_buffer_from_pool
+ * @test: pointer to kunit structure
+ *
+ * Tests the error returns from ibmveth_remove_buffer_from_pool.
+ * ibmveth_remove_buffer_from_pool also calls WARN_ON, so dmesg should be
+ * checked to see that these warnings happened.
+ *
+ * Return: void
+ */
+static void ibmveth_remove_buffer_from_pool_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+ u64 correlator;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ correlator = ((u64)IBMVETH_NUM_BUFF_POOLS << 32) | 0;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = ((u64)0 << 32) | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EINVAL, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ correlator = (u64)0 | 0;
+ pool->skbuff[0] = NULL;
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, false));
+ KUNIT_EXPECT_EQ(test, -EFAULT, ibmveth_remove_buffer_from_pool(adapter, correlator, true));
+
+ flush_work(&adapter->work);
+}
+
+/**
+ * ibmveth_rxq_get_buffer_test - unit test for ibmveth_rxq_get_buffer
+ * @test: pointer to kunit structure
+ *
+ * Tests ibmveth_rxq_get_buffer. ibmveth_rxq_get_buffer also calls WARN_ON for
+ * the NULL returns, so dmesg should be checked to see that these warnings
+ * happened.
+ *
+ * Return: void
+ */
+static void ibmveth_rxq_get_buffer_test(struct kunit *test)
+{
+ struct ibmveth_adapter *adapter = kunit_kzalloc(test, sizeof(*adapter), GFP_KERNEL);
+ struct sk_buff *skb = kunit_kzalloc(test, sizeof(*skb), GFP_KERNEL);
+ struct ibmveth_buff_pool *pool;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ INIT_WORK(&adapter->work, ibmveth_reset_kunit);
+
+ adapter->rx_queue.queue_len = 1;
+ adapter->rx_queue.index = 0;
+ adapter->rx_queue.queue_addr = kunit_kzalloc(test, sizeof(struct ibmveth_rx_q_entry),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, adapter->rx_queue.queue_addr);
+
+ /* Set sane values for buffer pools */
+ for (int i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+ pool_count[i], pool_size[i],
+ pool_active[i]);
+
+ pool = &adapter->rx_buff_pool[0];
+ pool->skbuff = kunit_kcalloc(test, pool->size, sizeof(void *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pool->skbuff);
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)IBMVETH_NUM_BUFF_POOLS << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | adapter->rx_buff_pool[0].size;
+ KUNIT_EXPECT_PTR_EQ(test, NULL, ibmveth_rxq_get_buffer(adapter));
+
+ pool->skbuff[0] = skb;
+ adapter->rx_queue.queue_addr[0].correlator = (u64)0 << 32 | 0;
+ KUNIT_EXPECT_PTR_EQ(test, skb, ibmveth_rxq_get_buffer(adapter));
+
+ flush_work(&adapter->work);
+}
+
+static struct kunit_case ibmveth_test_cases[] = {
+ KUNIT_CASE(ibmveth_remove_buffer_from_pool_test),
+ KUNIT_CASE(ibmveth_rxq_get_buffer_test),
+ {}
+};
+
+static struct kunit_suite ibmveth_test_suite = {
+ .name = "ibmveth-kunit-test",
+ .test_cases = ibmveth_test_cases,
+};
+
+kunit_test_suite(ibmveth_test_suite);
+#endif
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 8468e2c59d7a..b0a2460ec9f9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -134,38 +134,39 @@ struct ibmveth_rx_q {
};
struct ibmveth_adapter {
- struct vio_dev *vdev;
- struct net_device *netdev;
- struct napi_struct napi;
- unsigned int mcastFilterSize;
- void * buffer_list_addr;
- void * filter_list_addr;
- void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
- unsigned int tx_ltb_size;
- dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
- dma_addr_t buffer_list_dma;
- dma_addr_t filter_list_dma;
- struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
- struct ibmveth_rx_q rx_queue;
- int rx_csum;
- int large_send;
- bool is_active_trunk;
-
- u64 fw_ipv6_csum_support;
- u64 fw_ipv4_csum_support;
- u64 fw_large_send_support;
- /* adapter specific stats */
- u64 replenish_task_cycles;
- u64 replenish_no_mem;
- u64 replenish_add_buff_failure;
- u64 replenish_add_buff_success;
- u64 rx_invalid_buffer;
- u64 rx_no_buffer;
- u64 tx_map_failed;
- u64 tx_send_failed;
- u64 tx_large_packets;
- u64 rx_large_packets;
- /* Ethtool settings */
+ struct vio_dev *vdev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct work_struct work;
+ unsigned int mcastFilterSize;
+ void *buffer_list_addr;
+ void *filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
+ dma_addr_t buffer_list_dma;
+ dma_addr_t filter_list_dma;
+ struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
+ struct ibmveth_rx_q rx_queue;
+ int rx_csum;
+ int large_send;
+ bool is_active_trunk;
+
+ u64 fw_ipv6_csum_support;
+ u64 fw_ipv4_csum_support;
+ u64 fw_large_send_support;
+ /* adapter specific stats */
+ u64 replenish_task_cycles;
+ u64 replenish_no_mem;
+ u64 replenish_add_buff_failure;
+ u64 replenish_add_buff_success;
+ u64 rx_invalid_buffer;
+ u64 rx_no_buffer;
+ u64 tx_map_failed;
+ u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
+ /* Ethtool settings */
u8 duplex;
u32 speed;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1640d2f27833..5a331c1c76cb 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -147,6 +147,8 @@ config IXGBE
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
+ select NET_DEVLINK
+ select PLDMFW
select PHYLIB
help
This driver supports Intel(R) 10GbE PCI Express family of
@@ -367,6 +369,7 @@ config IGC
default n
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba9c19e6994c..952898151565 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -319,7 +319,7 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
unsigned long tx_hwtstamp_start;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8ebcb6a7d608..e0f492a6723f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3574,6 +3574,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
* @config: timestamp configuration
+ * @extack: netlink extended ACK for error report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3587,7 +3588,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* exception of "all V2 events regardless of level 2 or 4".
**/
static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_hw *hw = &adapter->hw;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
@@ -3598,8 +3600,10 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l2 = false;
u32 regval;
- if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) {
+ NL_SET_ERR_MSG(extack, "No HW timestamp support");
return -EINVAL;
+ }
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3608,6 +3612,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
case HWTSTAMP_TX_ON:
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported TX HW timestamp type");
return -ERANGE;
}
@@ -3681,6 +3686,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ NL_SET_ERR_MSG(extack, "Unsupported RX HW timestamp filter");
return -ERANGE;
}
@@ -3693,7 +3699,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
ew32(TSYNCTXCTL, regval);
if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
(regval & E1000_TSYNCTXCTL_ENABLED)) {
- e_err("Timesync Tx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Tx Control register not set as expected");
return -EAGAIN;
}
@@ -3706,7 +3713,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
E1000_TSYNCRXCTL_TYPE_MASK)) !=
(regval & (E1000_TSYNCRXCTL_ENABLED |
E1000_TSYNCRXCTL_TYPE_MASK))) {
- e_err("Timesync Rx Control register not set as expected\n");
+ NL_SET_ERR_MSG(extack,
+ "Timesync Rx Control register not set as expected");
return -EAGAIN;
}
@@ -3901,6 +3909,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
{
struct ptp_clock_info *info = &adapter->ptp_clock_info;
struct e1000_hw *hw = &adapter->hw;
+ struct netlink_ext_ack extack = {};
unsigned long flags;
u32 timinca;
s32 ret_val;
@@ -3932,7 +3941,12 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->systim_lock, flags);
/* restore the previous hwtstamp configuration settings */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ ret_val = e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config,
+ &extack);
+ if (ret_val) {
+ if (extack._msg)
+ e_err("%s\n", extack._msg);
+ }
}
/**
@@ -6079,8 +6093,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
- int cmd)
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
@@ -6140,7 +6153,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_set - control hardware time stamping
* @netdev: network interface device structure
- * @ifr: interface request
+ * @config: timestamp configuration
+ * @extack: netlink extended ACK report
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -6153,20 +6167,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
int ret_val;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ret_val = e1000e_config_hwtstamp(adapter, &config);
+ ret_val = e1000e_config_hwtstamp(adapter, config, extack);
if (ret_val)
return ret_val;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -6178,38 +6190,23 @@ static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
* by hardware so notify the caller the requested packets plus
* some others are time stamped.
*/
- config.rx_filter = HWTSTAMP_FILTER_SOME;
+ config->rx_filter = HWTSTAMP_FILTER_SOME;
break;
default:
break;
}
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
-static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *kernel_config)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
- sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
-}
+ *kernel_config = adapter->hwtstamp_config;
-static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return e1000_mii_ioctl(netdev, ifr, cmd);
- case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_set(netdev, ifr);
- case SIOCGHWTSTAMP:
- return e1000e_hwtstamp_get(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
@@ -7346,9 +7343,11 @@ static const struct net_device_ops e1000e_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e1000_netpoll,
#endif
- .ndo_set_features = e1000_set_features,
- .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
.ndo_features_check = passthru_features_check,
+ .ndo_hwtstamp_get = e1000e_hwtstamp_get,
+ .ndo_hwtstamp_set = e1000e_hwtstamp_set,
};
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 370b4bddee44..b11c35e307ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -817,10 +817,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw)
{
u32 num_queues, base_queue;
- u32 num_pf_int;
- u32 num_vf_int;
+ s32 num_pf_int;
+ s32 num_vf_int;
u32 num_vfs;
- u32 i, j;
+ s32 i;
+ u32 j;
u32 val;
u32 eol = 0x7ff;
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index fcb199efbea5..4af60e2f37df 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1339,8 +1339,13 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2);
return 0;
}
@@ -1350,19 +1355,24 @@ static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool roce_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!roce_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return ret;
}
@@ -1373,11 +1383,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1390,8 +1405,13 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP);
return 0;
}
@@ -1401,19 +1421,24 @@ static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool iw_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!iw_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return ret;
}
@@ -1428,7 +1453,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fd083647c14a..ddd0ad68185b 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -193,8 +193,6 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
-#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
-
enum ice_feature {
ICE_F_DSCP,
ICE_F_PHY_RCLK,
@@ -401,7 +399,6 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
- u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
@@ -515,6 +512,7 @@ enum ice_pf_flags {
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+ ICE_FLAG_LLDP_AQ_FLTR,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -557,7 +555,6 @@ struct ice_pf {
struct devlink_port devlink_port;
/* OS reserved IRQ details */
- struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
struct ice_virt_irq_tracker virt_irq_tracker;
@@ -592,7 +589,6 @@ struct ice_pf {
struct gnss_serial *gnss_serial;
struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
- u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@@ -625,14 +621,12 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
- u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
- struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
@@ -664,6 +658,7 @@ struct ice_pf {
struct ice_dplls dplls;
struct device *hwmon_dev;
struct ice_health health_reporters;
+ struct iidc_rdma_core_dev_info *cdev_info;
u8 num_quanta_prof_used;
};
@@ -1045,4 +1040,62 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
}
extern const struct xdp_metadata_ops ice_xdp_md_ops;
+
+/**
+ * ice_is_dual - Check if given config is multi-NAC
+ * @hw: pointer to HW structure
+ *
+ * Return: true if the device is running in mutli-NAC (Network
+ * Acceleration Complex) configuration variant, false otherwise
+ * (always false for non-E825 devices).
+ */
+static inline bool ice_is_dual(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+}
+
+/**
+ * ice_is_primary - Check if given device belongs to the primary complex
+ * @hw: pointer to HW structure
+ *
+ * Check if given PF/HW is running on primary complex in multi-NAC
+ * configuration.
+ *
+ * Return: true if the device is dual, false otherwise (always true
+ * for non-E825 devices).
+ */
+static inline bool ice_is_primary(struct ice_hw *hw)
+{
+ return hw->mac_type != ICE_MAC_GENERIC_3K_E825 ||
+ !ice_is_dual(hw) ||
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
+}
+
+/**
+ * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF
+ * @pf: pointer to PF structure
+ *
+ * Return: true if PF owns primary timer, false otherwise.
+ */
+static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf)
+{
+ return pf->hw.func_caps.ts_func_info.src_tmr_owned &&
+ ice_is_primary(&pf->hw);
+}
+
+/**
+ * ice_get_primary_hw - Get pointer to primary ice_hw structure
+ * @pf: pointer to PF structure
+ *
+ * Return: A pointer to ice_hw structure with access to timesync
+ * register space.
+ */
+static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
+{
+ if (!pf->adapter->ctrl_pf)
+ return &pf->hw;
+ else
+ return &pf->adapter->ctrl_pf->hw;
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 59df31c2c83f..4fedf0181c4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1135,6 +1135,8 @@ int ice_init_hw(struct ice_hw *hw)
}
}
+ hw->lane_num = ice_get_phy_lane_number(hw);
+
return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
@@ -3434,7 +3436,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
msg.msg_addr_low = lower_16_bits(reg_offset);
msg.msg_addr_high = receiver_id;
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, flag);
if (err)
@@ -4082,10 +4084,12 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
continue;
if (hw->pf_id == lport) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ ice_is_dual(hw) && !ice_is_primary(hw))
+ lane += ICE_PORTS_PER_QUAD;
kfree(options);
return lane;
}
-
lport++;
}
@@ -6011,15 +6015,21 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
/**
* ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
* @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
+ * @vsi: VSI to add the filter to
* @add: boolean for if adding or removing a filter
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
+ * with this HW or VSI, otherwise an error corresponding to
+ * the AQ transaction result.
*/
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
struct ice_aq_desc desc;
+ if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
cmd = &desc.params.lldp_filter_ctrl;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -6029,7 +6039,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
else
cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
- cmd->vsi_num = cpu_to_le16(vsi_num);
+ cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 9b00aa0ddf10..64c530b39191 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -290,8 +290,7 @@ int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 74418c445cc4..64737fc62306 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
- for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ for (i = 0; i < DSCP_MAX; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a7c510832824..533eb8930aa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
+ struct iidc_rdma_event *event;
int ret = ICE_DCB_NO_HW_CHG;
- struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
- set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
@@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- struct iidc_event *event;
+ struct iidc_rdma_priv_dev_info *privd;
+ struct iidc_rdma_core_dev_info *cdev;
+ struct iidc_rdma_event *event;
u8 tc_map = 0;
int v, ret;
@@ -783,13 +785,17 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- if (!locked) {
+
+ cdev = pf->cdev_info;
+ if (cdev && !locked) {
+ privd = cdev->iidc_priv;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -846,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- ice_cfg_sw_lldp(pf_vsi, false, true);
+ ice_cfg_sw_rx_lldp(pf, true);
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
@@ -945,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_setup_dcb_qos_info - Setup DCB QoS information
+ * @pf: ptr to ice_pf
+ * @qos_info: QoS param instance
+ */
+void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ if (!pf || !qos_info)
+ return;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < DSCP_MAX; i++)
+ qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
+}
+
+/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 800879a88c5e..da9ba814b4e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -31,6 +31,9 @@ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
+ice_setup_dcb_qos_info(struct ice_pf *pf,
+ struct iidc_rdma_qos_params *qos_info);
+void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
/**
@@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
+static inline void
+ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ qos_info->num_tc = 1;
+ qos_info->tc_info[0].rel_bw = 100;
+}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 6d50b90a7359..a10c1c8d8697 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
if (!ice_is_feature_supported(pf, ICE_F_DSCP))
return -EOPNOTSUPP;
- if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ if (app->protocol >= DSCP_MAX) {
netdev_err(netdev, "DSCP value 0x%04X out of range\n",
app->protocol);
return -EINVAL;
@@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
/* if the last DSCP mapping just got deleted, need to switch
* to L2 VLAN QoS mode
*/
- if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) &&
new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
ret = ice_aq_set_pfc_mode(&pf->hw,
ICE_AQC_PFC_VLAN_BASED_PFC,
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index ed21d7f55ac1..6aae03771746 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, false);
netif_addr_lock_bh(netdev);
__dev_uc_unsync(netdev, NULL);
@@ -245,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
u64 cd_cmd, dst_vsi;
if (!dst) {
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(eth->h_proto == htons(ETH_P_LLDP)))
+ return;
cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
@@ -278,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, true);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 7c2dc347e4e5..bbf9e6fd315b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1818,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
- ice_cfg_sw_lldp(vsi, false, false);
+ ice_cfg_sw_rx_lldp(vsi->back, false);
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
@@ -3964,11 +3964,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- if (pf->adev) {
+ if (pf->cdev_info && pf->cdev_info->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&pf->cdev_info->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (pf->cdev_info->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
@@ -3987,7 +3987,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&pf->cdev_info->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 1d118171de37..aceec184e89b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,7 +1605,7 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
- const enum ice_fltr_ptype dflt_rules[] = {
+ static const enum ice_fltr_ptype dflt_rules[] = {
ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index bab3e81cad5d..6ab53e430f91 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -9,22 +9,25 @@
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/**
- * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
- * @pf: pointer to PF struct
+ * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
*
* This function has to be called with a device_lock on the
- * pf->adev.dev to avoid race conditions.
+ * cdev->adev.dev to avoid race conditions.
+ *
+ * Return: pointer to the matched auxiliary driver struct
*/
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static struct iidc_rdma_core_auxiliary_drv *
+ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
{
struct auxiliary_device *adev;
- adev = pf->adev;
+ adev = cdev->adev;
if (!adev || !adev->dev.driver)
return NULL;
- return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
- adrv.driver);
+ return container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv, adrv.driver);
}
/**
@@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
* @pf: pointer to PF struct
* @event: event struct
*/
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
{
- struct iidc_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_dev_info *cdev;
if (WARN_ON_ONCE(!in_task()))
return;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return;
+
mutex_lock(&pf->adev_mutex);
- if (!pf->adev)
+ if (!cdev->adev)
goto finish;
- device_lock(&pf->adev->dev);
- iadrv = ice_get_auxiliary_drv(pf);
+ device_lock(&cdev->adev->dev);
+ iadrv = ice_get_auxiliary_drv(cdev);
if (iadrv && iadrv->event_handler)
- iadrv->event_handler(pf, event);
- device_unlock(&pf->adev->dev);
+ iadrv->event_handler(cdev, event);
+ device_unlock(&cdev->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be allocated
+ *
+ * Return: Zero on success or error code encountered
*/
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
+ struct ice_pf *pf;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
dev = ice_pf_to_dev(pf);
if (!ice_is_rdma_ena(pf))
@@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
- vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
@@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be freed
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
u32 teid;
u16 q_id;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
@@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
q_id = qset->qs_handle;
teid = qset->teid;
- vsi->qset_handle[qset->tc] = 0;
-
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
- * @pf: struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @reset_type: type of reset
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type)
{
enum ice_reset_req reset;
+ struct ice_pf *pf;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+
switch (reset_type) {
- case IIDC_PFR:
+ case IIDC_FUNC_RESET:
reset = ICE_RESET_PFR;
break;
- case IIDC_CORER:
+ case IIDC_DEV_RESET:
reset = ICE_RESET_CORER;
break;
- case IIDC_GLOBR:
- reset = ICE_RESET_GLOBR;
- break;
default:
- dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
@@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
- * @pf: pointer to struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
+ u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
int status;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
@@ -201,37 +223,23 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
- * ice_get_qos_params - parse QoS params for RDMA consumption
- * @pf: pointer to PF struct
- * @qos: set of QoS values
+ * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
+ *
+ * Return: Zero on success, error code on failure
*/
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- struct ice_dcbx_cfg *dcbx_cfg;
- unsigned int i;
- u32 up2tc;
-
- dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
-
- qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
-
- qos->pfc_mode = dcbx_cfg->pfc_mode;
- if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
- for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
- qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
-}
-EXPORT_SYMBOL_GPL(ice_get_qos_params);
+ struct msi_map map;
+ struct ice_pf *pf;
-int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
-{
- struct msi_map map = ice_alloc_irq(pf, true);
+ if (WARN_ON(!cdev))
+ return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+ map = ice_alloc_irq(pf, true);
if (map.index < 0)
return -ENOMEM;
@@ -244,12 +252,19 @@ EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
- * @pf: board private structure to initialize
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @entry: MSI-X entry to be removed
*/
-void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
struct msi_map map;
+ struct ice_pf *pf;
+
+ if (WARN_ON(!cdev || !entry))
+ return;
+
+ pf = pci_get_drvdata(cdev->pdev);
map.index = entry->entry;
map.virq = entry->vector;
@@ -263,19 +278,23 @@ EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
*/
static void ice_adev_release(struct device *dev)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
- iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+ adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
+ *
+ * Return: Zero on success, error code on failure
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ struct iidc_rdma_core_dev_info *cdev;
struct auxiliary_device *adev;
int ret;
@@ -285,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf)
if (!ice_is_rdma_ena(pf))
return 0;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
- iadev->pf = pf;
+ iadev->cdev_info = cdev;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
+ adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -310,7 +334,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
}
mutex_lock(&pf->adev_mutex);
- pf->adev = adev;
+ cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
return 0;
@@ -324,8 +348,8 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
mutex_lock(&pf->adev_mutex);
- adev = pf->adev;
- pf->adev = NULL;
+ adev = pf->cdev_info->adev;
+ pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
if (adev) {
@@ -340,7 +364,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
*/
int ice_init_rdma(struct ice_pf *pf)
{
+ struct iidc_rdma_priv_dev_info *privd;
struct device *dev = &pf->pdev->dev;
+ struct iidc_rdma_core_dev_info *cdev;
int ret;
if (!ice_is_rdma_ena(pf)) {
@@ -348,22 +374,50 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
}
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ pf->cdev_info = cdev;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ ret = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ privd->pf_id = pf->hw.pf_id;
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_xa;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->iidc_priv = privd;
+ privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+ privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
- pf->adev = NULL;
+ pf->cdev_info->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
+err_alloc_xa:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev);
+ pf->cdev_info = NULL;
+
return ret;
}
@@ -378,4 +432,7 @@ void ice_deinit_rdma(struct ice_pf *pf)
ice_unplug_aux_dev(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+ pf->cdev_info = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index 4b0c86757df9..17dbfcfb6a2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -4,10 +4,11 @@
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
struct ice_pf;
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 22371011c249..2410aee59fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0bcf9d127ac9..03bb16191237 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2065,12 +2065,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
+ *
+ * Adding an ethtype Tx rule to the uplink VSI results in it being applied
+ * to the whole port, so LLDP transmission for VFs will be blocked too.
*/
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
@@ -2085,19 +2088,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else {
- if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
- status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
- create);
- } else {
+ if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI);
+ if (!status || !create)
+ goto report;
+
+ dev_info(dev,
+ "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
+ vsi->vsi_num, status);
}
+
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
+ if (!status)
+ set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
+
}
+report:
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
- create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
+ dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "add" : "remove", tx ? "Tx" : "Rx",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
+ * @pf: the PF being configured
+ * @enable: enable or disable
+ *
+ * Configure switch rules to enable/disable LLDP handling by software
+ * across PF.
+ */
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
+{
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ vsi = ice_get_main_vsi(pf);
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+
+ if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ return;
+
+ ice_for_each_vf(pf, bkt, vf) {
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ continue;
+
+ if (ice_vf_is_lldp_ena(vf))
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+ }
}
/**
@@ -2528,7 +2571,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
+ ice_vsi_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
@@ -2825,9 +2868,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
+ if (!ice_is_safe_mode(pf) &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
+ (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_lldp_ena(vsi->vf))))
+ ice_vsi_cfg_sw_lldp(vsi, false, false);
ice_vsi_decfg(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index b4c9cb28a016..654516c5fc3e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -29,7 +29,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable);
int ice_set_link(struct ice_vsi *vsi, bool ena);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d390157b59fe..20d3baf955e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2401,11 +2401,11 @@ static void ice_service_task(struct work_struct *work)
}
if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
swap(event->reg, pf->oicr_err_reg);
ice_send_event_to_aux(pf, event);
@@ -2424,11 +2424,11 @@ static void ice_service_task(struct work_struct *work)
ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -8330,11 +8330,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @np: net device to configure
* @filter_dev: device on which filter is added
* @cls_flower: offload data
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added or deleted,
+ * negative error code otherwise.
*/
static int
ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
struct net_device *filter_dev,
- struct flow_cls_offload *cls_flower)
+ struct flow_cls_offload *cls_flower,
+ bool ingress)
{
struct ice_vsi *vsi = np->vsi;
@@ -8343,7 +8348,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(vsi, cls_flower);
default:
@@ -8352,20 +8357,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
}
/**
- * ice_setup_tc_block_cb - callback handler registered for TC block
+ * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
* @type: TC SETUP type
* @type_data: TC flower offload data that contains user input
* @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
*/
static int
-ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct ice_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, np->vsi->netdev,
- type_data);
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb_egress - callback handler for egress TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
+ */
+static int
+ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9310,27 +9341,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum flow_block_binder_type binder_type;
+ struct iidc_rdma_core_dev_info *cdev;
struct ice_pf *pf = np->vsi->back;
+ flow_setup_cb_t *flower_handler;
bool locked = false;
int err;
switch (type) {
case TC_SETUP_BLOCK:
+ binder_type =
+ ((struct flow_block_offload *)type_data)->binder_type;
+
+ switch (binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ flower_handler = ice_setup_tc_block_cb_ingress;
+ break;
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ flower_handler = ice_setup_tc_block_cb_egress;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
return flow_block_cb_setup_simple(type_data,
&ice_block_cb_list,
- ice_setup_tc_block_cb,
- np, np, true);
+ flower_handler,
+ np, np, false);
case TC_SETUP_QDISC_MQPRIO:
if (ice_is_eswitch_mode_switchdev(pf)) {
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
return -EOPNOTSUPP;
}
- if (pf->adev) {
+ cdev = pf->cdev_info;
+ if (cdev && cdev->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&cdev->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (cdev->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
@@ -9344,7 +9393,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&cdev->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
@@ -9380,7 +9429,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, priv->netdev,
(struct flow_cls_offload *)
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1fd1ae03eb90..b79a148ed0f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -305,6 +305,9 @@ u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
u32 hi, lo, lo2;
u8 tmr_idx;
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
tmr_idx = ice_get_ptp_src_clock_index(hw);
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
@@ -1624,14 +1627,6 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
int pin_desc_idx;
u8 tmr_idx;
- /* Reject requests with unsupported flags */
-
- if (rq->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
chan = rq->index;
@@ -1802,9 +1797,6 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
struct ice_hw *hw = &pf->hw;
int pin_desc_idx;
- if (rq->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
if (pin_desc_idx < 0)
return -EIO;
@@ -2737,6 +2729,11 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
+ info->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ info->supported_perout_flags = PTP_PEROUT_PHASE;
+
switch (pf->hw.mac_type) {
case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
@@ -2986,6 +2983,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
+ * @pf: Board private structure
+ * @rebuild: rebuild if true, prepare if false
+ * @reset_type: the reset type being performed
+ */
+static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
+ enum ice_reset_req reset_type)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->adapter->ports.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_node);
+ struct ice_pf *peer_pf = ptp_port_to_pf(port);
+
+ if (!ice_is_primary(&peer_pf->hw)) {
+ if (rebuild)
+ ice_ptp_rebuild(peer_pf, reset_type);
+ else
+ ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
+ }
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
* @reset_type: the reset type being performed
@@ -2993,6 +3016,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
@@ -3008,6 +3032,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR)
return;
+ if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
@@ -3129,13 +3156,6 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-static bool ice_is_primary(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
- true;
-}
-
static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
@@ -3355,17 +3375,16 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int lane_num, err;
+ int err;
ptp->state = ICE_PTP_INITIALIZING;
- lane_num = ice_get_phy_lane_number(hw);
- if (lane_num < 0) {
- err = lane_num;
+ if (hw->lane_num < 0) {
+ err = hw->lane_num;
goto err_exit;
}
+ ptp->port.port_num = hw->lane_num;
- ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 89bb8461284a..ccac84eb34c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -240,7 +240,7 @@ static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_rd,
- .dest_dev = cgu,
+ .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@ -272,7 +272,7 @@ static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
.opcode = ice_sbq_msg_wr,
- .dest_dev = cgu,
+ .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
@@ -874,8 +874,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
*/
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -891,6 +895,9 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
@@ -919,16 +926,24 @@ static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
*
* Return: destination sideband queue PHY device.
*/
-static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
- u8 port)
+static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
{
- /* On a single complex E825, PHY 0 is always destination device phy_0
+ u8 curr_phy, tgt_phy;
+
+ tgt_phy = port >= hw->ptp.ports_per_phy;
+ curr_phy = hw->lane_num >= hw->ptp.ports_per_phy;
+ /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY.
+ * On a single complex E825C, PHY 0 is always destination device phy_0
* and PHY 1 is phy_0_peer.
+ * On dual complex E825C, device phy_0 points to PHY on a current
+ * complex and phy_0_peer to PHY on a different complex.
*/
- if (port >= hw->ptp.ports_per_phy)
- return eth56g_phy_1;
+ if ((!ice_is_dual(hw) && tgt_phy == 1) ||
+ (ice_is_dual(hw) && tgt_phy != curr_phy))
+ return ice_sbq_dev_phy_0_peer;
else
- return eth56g_phy_0;
+ return ice_sbq_dev_phy_0;
}
/**
@@ -2417,6 +2432,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
u64 *phy_time, u64 *phc_time)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u64 tx_time, rx_time;
u32 zo, lo;
u8 tmr_idx;
@@ -2436,8 +2452,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
- zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
- lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ if (ice_is_primary(hw)) {
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ } else {
+ zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx));
+ }
*phc_time = (u64)lo << 32 | zo;
/* Read the captured PHY time from the PHY shadow registers */
@@ -2574,6 +2595,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
*/
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 lo, hi;
u64 incval;
u8 tmr_idx;
@@ -2599,8 +2621,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
if (err)
return err;
- lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
- hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ if (ice_is_primary(hw)) {
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ } else {
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx));
+ }
incval = (u64)hi << 32 | lo;
err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
@@ -2631,25 +2658,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
- * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
- * @hw: pointer to HW struct
- * @enable: Enable or disable access
- *
- * Enable sideband devices (PHY and others) access.
- */
-static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
-{
- u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
-
- if (enable)
- val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
- else
- val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
-
- wr32(hw, PF_SB_REM_DEV_CTL, val);
-}
-
-/**
* ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
* @hw: pointer to HW struct
*
@@ -2659,8 +2667,6 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
*/
static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
- ice_sb_access_ena_eth56g(hw, true);
-
/* Initialize the Clock Generation Unit */
return ice_init_cgu_e82x(hw);
}
@@ -2747,8 +2753,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
-
- ice_sb_access_ena_eth56g(hw, true);
}
/* E822 family functions
@@ -2781,7 +2785,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
}
/**
@@ -3104,7 +3108,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
@@ -4823,7 +4827,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
@@ -4853,7 +4857,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index e5925ccc2613..83f20fa7ace7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -444,11 +444,6 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
}
}
-static inline bool ice_is_dual(struct ice_hw *hw)
-{
- return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
-}
-
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index fb7a1b9a4313..cb08746556a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
+ true);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
@@ -336,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr)
static void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -417,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr)
if (err)
goto err_netdev;
+ err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
+ if (err)
+ goto err_drop_lldp;
+
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
@@ -429,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr)
return 0;
err_cfg_vsi:
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
+err_drop_lldp:
unregister_netdev(repr->netdev);
err_netdev:
ice_devlink_destroy_vf_port(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 3b0054faf70c..183dd5457d6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -46,13 +46,10 @@ struct ice_sbq_evt_desc {
u8 data[24];
};
-enum ice_sbq_msg_dev {
- eth56g_phy_0 = 0x02,
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06,
- eth56g_phy_1 = 0x0D,
+enum ice_sbq_dev_id {
+ ice_sbq_dev_phy_0 = 0x02,
+ ice_sbq_dev_cgu = 0x06,
+ ice_sbq_dev_phy_0_peer = 0x0D,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index f1648cf103b7..0e4dc1a5cff0 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -63,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
if (vf->lan_vsi_idx != ICE_NO_VSI) {
ice_vf_vsi_release(vf);
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
}
last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
@@ -1402,6 +1403,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
mutex_lock(&vf->cfg_lock);
+ while (!trusted && vf->num_mac_lldp)
+ ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
+
vf->trusted = trusted;
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 4a91e0aaf0a5..9d9a7edd3618 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3146,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -5978,7 +5978,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index ea39b999a0d0..fb9ea7f8ef44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -12,14 +12,11 @@
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
- * @headers: Pointer to TC flower filter header structure
* @fltr: Pointer to outer TC filter structure
*
- * Determine lookup count based on TC flower input for switch filter.
+ * Return: lookup count based on TC flower input for a switch filter.
*/
-static int
-ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
- struct ice_tc_flower_fltr *fltr)
+static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{
int lkups_cnt = 1; /* 0th lookup is metadata */
@@ -684,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev,
fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
+ ice_tc_is_dev_uplink(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -716,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
{
fltr->action.fltr_act = ICE_DROP_PACKET;
- if (ice_is_port_repr_netdev(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
+ if (!ice_tc_is_dev_uplink(filter_dev) &&
+ !(ice_is_port_repr_netdev(filter_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -767,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
return 0;
}
+static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
+}
+
+static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ if (!ice_is_switchdev_running(vsi->back))
+ return false;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
+ ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+}
+
+static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == uplink;
+}
+
+static struct ice_tc_flower_fltr *
+ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ return fltr;
+
+ return NULL;
+}
+
+static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ ice_for_each_vf(pf, bkt, vf)
+ if (vf->lldp_tx_ena)
+ return true;
+
+ return false;
+}
+
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
+{
+ struct ice_rule_query_data remove_entry = {
+ .rid = vsi->vf->lldp_recipe_id,
+ .rule_id = vsi->vf->lldp_rule_id,
+ .vsi_handle = vsi->idx,
+ };
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (vsi->vf->lldp_tx_ena)
+ return 0;
+
+ if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
+ return -EINVAL;
+
+ if (!deinit && ice_any_vf_lldp_tx_ena(pf))
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
+ if (!err)
+ vsi->vf->lldp_tx_ena = true;
+
+ return err;
+}
+
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
+{
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_rule_info rinfo = {
+ .priority = 7,
+ .src_vsi = vsi->idx,
+ .sw_act = {
+ .src = vsi->idx,
+ .flag = ICE_FLTR_TX,
+ .fltr_act = ICE_DROP_PACKET,
+ .vsi_handle = vsi->idx,
+ },
+ .flags_info.act_valid = true,
+ };
+ struct ice_adv_lkup_elem list[3];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (!init && !vsi->vf->lldp_tx_ena)
+ return 0;
+
+ memset(list, 0, sizeof(list));
+ ice_rule_add_direction_metadata(&list[0]);
+ ice_rule_add_src_vsi_metadata(&list[1]);
+ list[2].type = ICE_ETYPE_OL;
+ list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
+ list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
+
+ err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
+ &rule_added);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add an LLDP rule to VSI 0x%X: %d\n",
+ vsi->idx, err);
+ } else {
+ vsi->vf->lldp_recipe_id = rule_added.rid;
+ vsi->vf->lldp_rule_id = rule_added.rule_id;
+ vsi->vf->lldp_tx_ena = false;
+ }
+
+ return err;
+}
+
+static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
+ if (!ice_is_fltr_vf_tx_lldp(fltr))
+ continue;
+ ice_pass_vf_tx_lldp(fltr->src_vsi, true);
+ break;
+ }
+}
+
+static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
+{
+ int i;
+
+ /* Make the VF LLDP fwd to uplink rule dormant */
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vf_vsi = pf->vsi[i];
+
+ if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
+ ice_drop_vf_tx_lldp(vf_vsi, false);
+ }
+}
+
static int
ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data rule_added;
struct ice_hw *hw = &vsi->back->hw;
@@ -785,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EOPNOTSUPP;
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_pass_vf_tx_lldp(vsi, false);
+
+ lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -814,6 +959,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
+ /* PF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
@@ -846,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
goto exit;
}
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_add_pf_lldp_drop_rule(vsi);
+
/* store the output params, which are needed later for removing
* advanced switch filter
*/
@@ -985,7 +1141,6 @@ static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
@@ -1021,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return PTR_ERR(dest_vsi);
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -1056,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
case ICE_DROP_PACKET:
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ }
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1071,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
@@ -1463,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
* @filter_dev: Pointer to device on which filter is being added
* @f: Pointer to struct flow_cls_offload
* @fltr: Pointer to filter structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
+ * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
+ * for the given VSI.
*/
static int
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr *fltr)
+ struct ice_tc_flower_fltr *fltr, bool ingress)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -1551,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
+ if (!ingress) {
+ bool switchdev =
+ ice_is_eswitch_mode_switchdev(vsi->back);
+
+ if (switchdev != (n_proto_key == ETH_P_LLDP)) {
+ NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
+ "%sLLDP filtering is not supported on egress in %s mode",
+ switchdev ? "Non-" : "",
+ switchdev ? "switchdev" :
+ "legacy");
+ return -EOPNOTSUPP;
+ }
+ }
+
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
headers->l3_key.ip_proto = match.key->ip_proto;
@@ -1726,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
}
+
+ /* Ingress filter on representor results in an egress filter in HW
+ * and vice versa
+ */
+ ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
+ fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
+ ICE_ESWITCH_FLTR_EGRESS;
+
return 0;
}
@@ -1939,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_pf *pf = vsi->back;
int err;
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_del_pf_lldp_drop_rule(pf);
+
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_drop_vf_tx_lldp(vsi, false);
+
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
@@ -1975,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* @vsi: Pointer to VSI
* @f: Pointer to flower offload structure
* @__fltr: Pointer to struct ice_tc_flower_fltr
+ * @ingress: if the rule is added to an ingress block
*
* This function parses TC-flower input fields, parses action,
* and adds a filter.
+ *
+ * Return: 0 if the filter was successfully added,
+ * negative error code otherwise.
*/
static int
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr **__fltr)
+ struct ice_tc_flower_fltr **__fltr, bool ingress)
{
struct ice_tc_flower_fltr *fltr;
int err;
@@ -1999,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
fltr->src_vsi = vsi;
INIT_HLIST_NODE(&fltr->tc_flower_node);
- err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
if (err < 0)
goto err;
@@ -2042,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
* @netdev: Pointer to filter device
* @vsi: Pointer to VSI
* @cls_flower: Pointer to flower offload structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added,
+ * negative error code otherwise.
*/
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower)
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct net_device *vsi_netdev = vsi->netdev;
@@ -2080,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
}
/* prep and add TC-flower filter in HW */
- err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d84f153517ec..8a3ab2f22af9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
}
struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower);
-int
-ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress);
+int ice_del_cls_flower(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init);
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 1e4f6f6ee449..0e5107fe62ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2440,19 +2440,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
- offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
- ICE_TX_CTX_DESC_SWTCH_UPLINK <<
- ICE_TXD_CTX_QW1_CMD_S);
- ice_tstamp(tx_ring, skb, first, &offload);
if ((ice_is_switchdev_running(vsi->back) ||
ice_lag_is_switchdev_running(vsi->back)) &&
vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ ICE_TX_CTX_DESC_SWTCH_UPLINK <<
+ ICE_TXD_CTX_QW1_CMD_S);
+
+ ice_tstamp(tx_ring, skb, first, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 0aab21113cc4..3d68f465952d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -19,6 +19,7 @@
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
#include <linux/wait.h>
+#include <net/dscp.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -695,7 +696,6 @@ struct ice_dcb_app_priority_table {
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
-#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -718,9 +718,9 @@ struct ice_dcbx_cfg {
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
- DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ DECLARE_BITMAP(dscp_mapped, DSCP_MAX);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
- u8 dscp_map[ICE_DSCP_NUM_VAL];
+ u8 dscp_map[DSCP_MAX];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -970,6 +970,7 @@ struct ice_hw {
u8 intrl_gran;
struct ice_ptp_hw ptp;
+ s8 lane_num;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 815ad0bfe832..48cd533e93b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
vsi->num_vlan = 0;
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -1401,3 +1402,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
rcu_read_unlock();
return ctrl_vsi;
}
+
+/**
+ * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
+ * @vf: a VF to add the address to
+ * @vsi: the corresponding VSI
+ * @incr: is the rule added or removed
+ */
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr)
+{
+ bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
+ bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+ bool is_ena;
+
+ if (WARN_ON(!vsi)) {
+ vf->num_mac_lldp = 0;
+ return;
+ }
+
+ vf->num_mac_lldp += incr ? 1 : -1;
+ is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+
+ if (was_ena != is_ena)
+ ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 799b2c1f1184..482f4285fd35 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -124,6 +124,7 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+ u8 lldp_tx_ena:1;
u32 ptp_caps;
@@ -134,6 +135,7 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
+ u16 num_mac_lldp;
u16 num_vf_qs; /* num of queue configured per VF */
u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
@@ -149,6 +151,9 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
+ u16 lldp_recipe_id;
+ u16 lldp_rule_id;
+
u16 num_msix; /* num of MSI-X configured on this VF */
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -180,6 +185,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
return vf->port_vlan_info.tpid;
}
+static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
+{
+ return vf->num_mac_lldp && vf->trusted;
+}
+
/* VF Hash Table access functions
*
* These functions provide abstraction for interacting with the VF hash table.
@@ -245,6 +255,8 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 7c3006eb68dd..eeeb9968e477 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2266,6 +2266,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
}
/**
+ * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
+ * @mac: address to check
+ *
+ * Return: true if the address is one of the three possible LLDP multicast
+ * addresses, false otherwise.
+ */
+static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
+{
+ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
+
+ if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
+ return false;
+
+ return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
+}
+
+/**
+ * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
+ * @vf: a VF to add the address to
+ * @mac: address to check
+ *
+ * Return: true if the VF is allowed to add such MAC address, false otherwise.
+ */
+static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ if (is_unicast_ether_addr(mac) &&
+ !ice_can_vf_change_mac((struct ice_vf *)vf)) {
+ dev_err(dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return false;
+ }
+
+ if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
+ dev_warn(dev,
+ "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
+ vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
@@ -2283,10 +2328,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ if (!ice_vc_can_add_mac(vf, mac_addr))
return -EPERM;
- }
ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (ret == -EEXIST) {
@@ -2301,6 +2344,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return ret;
} else {
vf->num_mac++;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, true);
}
ice_vfhw_mac_add(vf, vc_ether_addr);
@@ -2395,6 +2440,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, false);
return 0;
}
@@ -4275,7 +4322,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
index 1addd663acad..2c359a8551c7 100644
--- a/drivers/net/ethernet/intel/idpf/Kconfig
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -4,6 +4,7 @@
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
select LIBETH
help
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 2ce01a0b5898..83ac5e296382 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -17,3 +17,6 @@ idpf-y := \
idpf_vf_dev.o
idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
+
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
+idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index aef0e9775a33..1e812c3f62f9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -143,6 +143,7 @@ enum idpf_vport_state {
* @vport_id: Vport identifier
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
+ * @max_tx_hdr_size: Max header length hardware can support
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -153,6 +154,7 @@ struct idpf_netdev_priv {
u32 vport_id;
u32 link_speed_mbps;
u16 vport_idx;
+ u16 max_tx_hdr_size;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
@@ -189,6 +191,7 @@ struct idpf_vport_max_q {
* @mb_intr_reg_init: Mailbox interrupt register initialization
* @reset_reg_init: Reset register initialization
* @trigger_reset: Trigger a reset to occur
+ * @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
@@ -197,6 +200,7 @@ struct idpf_reg_ops {
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
enum idpf_flags trig_cause);
+ void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
/**
@@ -290,6 +294,9 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @sw_marker_wq: workqueue for marker packets
+ * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
+ * @tstamp_config: The Tx tstamp config
+ * @tstamp_task: Tx timestamping task
*/
struct idpf_vport {
u16 num_txq;
@@ -334,6 +341,10 @@ struct idpf_vport {
bool link_up;
wait_queue_head_t sw_marker_wq;
+
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct work_struct tstamp_task;
};
/**
@@ -478,6 +489,13 @@ struct idpf_vport_config {
struct idpf_vc_xn_manager;
+#define idpf_for_each_vport(adapter, iter) \
+ for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
+ *iter = (adapter)->max_vports ? *__##iter : NULL; \
+ iter; \
+ iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
+ *__##iter : NULL)
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -530,6 +548,7 @@ struct idpf_vc_xn_manager;
* @vector_lock: Lock to protect vector distribution
* @queue_lock: Lock to protect queue distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
+ * @ptp: Storage for PTP-related data
*/
struct idpf_adapter {
struct pci_dev *pdev;
@@ -587,6 +606,8 @@ struct idpf_adapter {
struct mutex vector_lock;
struct mutex queue_lock;
struct mutex vc_buf_lock;
+
+ struct idpf_ptp *ptp;
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index e8e046ef2f0d..9642494a67d8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -123,9 +123,12 @@ struct idpf_ctlq_info {
/**
* enum idpf_mbx_opc - PF/VF mailbox commands
* @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to
+ * any peer driver
*/
enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_cp = 0x0801,
+ idpf_mbq_opc_send_msg_to_peer_drv = 0x0804,
};
/* API supported for control queue management */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 41e4bd49402a..3fae81f1f988 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -4,6 +4,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
@@ -149,6 +150,18 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
}
/**
+ * idpf_ptp_reg_init - Initialize required registers
+ * @adapter: Driver specific private structure
+ *
+ * Set the bits required for enabling shtime and cmd execution
+ */
+static void idpf_ptp_reg_init(const struct idpf_adapter *adapter)
+{
+ adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M;
+ adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M;
+}
+
+/**
* idpf_reg_ops_init - Initialize register API function pointers
* @adapter: Driver specific private structure
*/
@@ -159,6 +172,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter)
adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+ adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 59b1a1a09996..9bdb309b668e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -1312,6 +1313,71 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+/**
+ * idpf_get_timestamp_filters - Get the supported timestamping mode
+ * @vport: Virtual port structure
+ * @info: ethtool timestamping info structure
+ *
+ * Get the Tx/Rx timestamp filters.
+ */
+static void idpf_get_timestamp_filters(const struct idpf_vport *vport,
+ struct kernel_ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ if (!vport->tx_tstamp_caps ||
+ vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE)
+ return;
+
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE;
+
+ info->tx_types |= BIT(HWTSTAMP_TX_ON);
+}
+
+/**
+ * idpf_get_ts_info - Get device PHC association
+ * @netdev: network interface device structure
+ * @info: ethtool timestamping info structure
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ if (!mutex_trylock(&np->adapter->vport_ctrl_lock))
+ return -EBUSY;
+
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->adapter->ptp) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) &&
+ vport->adapter->ptp->clock) {
+ info->phc_index = ptp_clock_index(vport->adapter->ptp->clock);
+ idpf_get_timestamp_filters(vport, info);
+ } else {
+ pci_dbg(vport->adapter->pdev, "PTP clock not detected\n");
+ err = ethtool_op_get_ts_info(netdev, info);
+ }
+
+unlock:
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+
+ return err;
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1336,6 +1402,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_ringparam = idpf_get_ringparam,
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
+ .get_ts_info = idpf_get_ts_info,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
index 24edb8a6ec2e..cc9aa2b6a14a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -53,6 +53,10 @@
#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+/* Timesync registers */
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0)
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2)
+
/* Interrupts */
#define PF_GLINT_BASE 0x08900000
#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index 8c7f8ef8f1a1..7492d1713243 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -282,7 +282,18 @@ struct idpf_flex_tx_tso_ctx_qw {
u8 flex;
};
-struct idpf_flex_tx_ctx_desc {
+union idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */
+ struct {
+ __le64 qw0;
+#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32)
+ __le64 qw1;
+#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30)
+#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63)
+ } tsyn;
+
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 82f09b4030bc..bab12ecb2df5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
+#include "idpf_ptp.h"
static const struct net_device_ops idpf_netdev_ops;
@@ -144,22 +145,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
}
/**
- * idpf_set_mb_vec_id - Set vector index for mailbox
- * @adapter: adapter structure to access the vector chunks
- *
- * The first vector id in the requested vector chunks from the CP is for
- * the mailbox
- */
-static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
-{
- if (adapter->req_vec_chunks)
- adapter->mb_vector.v_idx =
- le16_to_cpu(adapter->caps.mailbox_vector_id);
- else
- adapter->mb_vector.v_idx = 0;
-}
-
-/**
* idpf_mb_intr_init - Initialize the mailbox interrupt
* @adapter: adapter structure to store the mailbox vector
*/
@@ -349,7 +334,7 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_irq;
}
- idpf_set_mb_vec_id(adapter);
+ adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id);
vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
if (!vecids) {
@@ -723,6 +708,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
@@ -740,6 +726,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
spin_lock_init(&np->stats_lock);
@@ -2203,8 +2190,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -2227,7 +2214,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
goto unsupported;
len = skb_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
@@ -2240,7 +2227,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */
len = skb_inner_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a
@@ -2342,6 +2329,60 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0;
}
+static int idpf_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EOPNOTSUPP;
+ }
+
+ err = idpf_ptp_set_timestamp_mode(vport, config);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+static int idpf_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!vport->link_up) {
+ idpf_vport_ctrl_unlock(netdev);
+ return -EPERM;
+ }
+
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) &&
+ !idpf_ptp_is_vport_rx_tstamp_ena(vport)) {
+ idpf_vport_ctrl_unlock(netdev);
+ return 0;
+ }
+
+ *config = vport->tstamp_config;
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
@@ -2354,4 +2395,6 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
+ .ndo_hwtstamp_get = idpf_hwtstamp_get,
+ .ndo_hwtstamp_set = idpf_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index b35713036a54..0efd9c0c7a90 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -168,6 +168,10 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free;
}
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n");
+
/* set up for high or low dma */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err) {
@@ -199,9 +203,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_serv_wq_alloc;
}
- adapter->mbx_wq = alloc_workqueue("%s-%s-mbx",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
- dev_driver_string(dev),
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI,
+ 0, dev_driver_string(dev),
dev_name(dev));
if (!adapter->mbx_wq) {
dev_err(dev, "Failed to allocate mailbox workqueue\n");
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
new file mode 100644
index 000000000000..4f8725c85332
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -0,0 +1,873 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+
+/**
+ * idpf_ptp_get_access - Determine the access type of the PTP features
+ * @adapter: Driver specific private structure
+ * @direct: Capability that indicates the direct access
+ * @mailbox: Capability that indicates the mailbox access
+ *
+ * Return: the type of supported access for the PTP feature.
+ */
+static enum idpf_ptp_access
+idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox)
+{
+ if (adapter->ptp->caps & direct)
+ return IDPF_PTP_DIRECT;
+ else if (adapter->ptp->caps & mailbox)
+ return IDPF_PTP_MAILBOX;
+ else
+ return IDPF_PTP_NONE;
+}
+
+/**
+ * idpf_ptp_get_features_access - Determine the access type of PTP features
+ * @adapter: Driver specific private structure
+ *
+ * Fulfill the adapter structure with type of the supported PTP features
+ * access.
+ */
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 direct, mailbox;
+
+ /* Get the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB;
+ ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Set the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
+ ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Adjust the device clock time */
+ direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK;
+ mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB;
+ ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
+ /* Tx timestamping */
+ direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS;
+ mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB;
+ ptp->tx_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+}
+
+/**
+ * idpf_ptp_enable_shtime - Enable shadow time and execute a command
+ * @adapter: Driver specific private structure
+ */
+static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter)
+{
+ u32 shtime_enable, exec_cmd;
+
+ /* Get offsets */
+ shtime_enable = adapter->ptp->cmd.shtime_enable_mask;
+ exec_cmd = adapter->ptp->cmd.exec_cmd_mask;
+
+ /* Set the shtime en and the sync field */
+ writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+ writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync);
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ *
+ * Return: the device clock time.
+ */
+static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+ u32 hi, lo;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox
+ * @adapter: Driver specific private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored when NULL is given.
+ * @src_clk: Returned main timer value in nanoseconds unit
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter,
+ struct ptp_system_timestamp *sts,
+ u64 *src_clk)
+{
+ struct idpf_ptp_dev_timers clk_time;
+ int err;
+
+ /* Read the system timestamp pre PHC read */
+ ptp_read_system_prets(sts);
+
+ err = idpf_ptp_get_dev_clk_time(adapter, &clk_time);
+ if (err)
+ return err;
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ *src_clk = clk_time.dev_clk_time_ns;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_read_src_clk_reg - Read the main timer value
+ * @adapter: Driver specific private structure
+ * @src_clk: Returned main timer value in nanoseconds unit
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock time on success, -errno otherwise.
+ */
+static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
+ struct ptp_system_timestamp *sts)
+{
+ switch (adapter->ptp->get_dev_clk_time_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_MAILBOX:
+ return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk);
+ case IDPF_PTP_DIRECT:
+ *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Return: the device clock value in ns, after converting it into a timespec
+ * struct on success, -errno otherwise.
+ */
+static int idpf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ u64 time_ns;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts);
+ if (err)
+ return -EACCES;
+
+ *ts = ns_to_timespec64(time_ns);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx
+ * queue group.
+ * @grp: receive queue group in which Rx timestamp is enabled
+ * @split: Indicates whether the queue model is split or single queue
+ * @systime: Cached system time
+ */
+static void
+idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split,
+ u64 systime)
+{
+ struct idpf_rx_queue *rxq;
+ u16 i;
+
+ if (!split) {
+ for (i = 0; i < grp->singleq.num_rxq; i++) {
+ rxq = grp->singleq.rxqs[i];
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ } else {
+ for (i = 0; i < grp->splitq.num_rxq_sets; i++) {
+ rxq = &grp->splitq.rxq_sets[i]->rxq;
+ if (rxq)
+ WRITE_ONCE(rxq->cached_phc_time, systime);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_update_cached_phctime - Update the cached PHC time values
+ * @adapter: Driver specific private structure
+ *
+ * This function updates the system time values which are cached in the adapter
+ * structure and the Rx queues.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
+{
+ u64 systime;
+ int err;
+
+ err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL);
+ if (err)
+ return -EACCES;
+
+ /* Update the cached PHC time stored in the adapter structure.
+ * These values are used to extend Tx timestamp values to 64 bit
+ * expected by the stack.
+ */
+ WRITE_ONCE(adapter->ptp->cached_phc_time, systime);
+ WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
+
+ idpf_for_each_vport(adapter, vport) {
+ bool split;
+
+ if (!vport || !vport->rxq_grps)
+ continue;
+
+ split = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+ u64 ns;
+
+ access = adapter->ptp->set_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ ns = timespec64_to_ns(ts);
+
+ err = idpf_ptp_set_dev_clk_time(adapter, ns);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to set the time, err: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+ int err;
+
+ err = idpf_ptp_gettimex64(info, &now, NULL);
+ if (err)
+ return err;
+
+ then = ns_to_timespec64(delta);
+ now = timespec64_add(now, then);
+
+ return idpf_ptp_settime64(info, &now);
+}
+
+/**
+ * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN)
+ return idpf_ptp_adjtime_nonatomic(info, delta);
+
+ err = idpf_ptp_adj_dev_clk_time(adapter, delta);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n",
+ delta, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_ptp_update_cached_phctime(adapter);
+ if (err)
+ pci_warn(adapter->pdev,
+ "Unable to immediately update cached PHC time\n");
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+ enum idpf_ptp_access access;
+ u64 incval, diff;
+ int err;
+
+ access = adapter->ptp->adj_dev_clk_time_access;
+ if (access != IDPF_PTP_MAILBOX)
+ return -EOPNOTSUPP;
+
+ incval = adapter->ptp->base_incval;
+
+ diff = adjust_by_scaled_ppm(incval, scaled_ppm);
+ err = idpf_ptp_adj_dev_clk_fine(adapter, diff);
+ if (err)
+ pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n",
+ scaled_ppm, ERR_PTR(err));
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_verify_pin - Verify if pin supports requested pin function
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * Return: EOPNOTSUPP as not supported yet.
+ */
+static int idpf_ptp_gpio_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx
+ * timestamp value to 64b.
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Return: Tx timestamp value extended to 64 bits based on cached PHC time.
+ */
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_timestamp value.
+ */
+ delta = in_timestamp - phc_time_lo;
+
+ if (delta > U32_MAX / 2) {
+ /* Reverse the delta calculation here */
+ delta = phc_time_lo - in_timestamp;
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @vport: Virtual port structure
+ * @in_tstamp: Ingress/egress timestamp value
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * Return: Tx timestamp value extended to 64 bits.
+ */
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
+{
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ unsigned long discard_time;
+
+ discard_time = ptp->cached_phc_jiffies + 2 * HZ;
+
+ if (time_is_before_jiffies(discard_time))
+ return 0;
+
+ return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
+ lower_32_bits(in_tstamp));
+}
+
+/**
+ * idpf_ptp_request_ts - Request an available Tx timestamp index
+ * @tx_q: Transmit queue on which the Tx timestamp is requested
+ * @skb: The SKB to associate with this timestamp request
+ * @idx: Index of the Tx timestamp latch
+ *
+ * Request tx timestamp index negotiated during PTP init that will be set into
+ * Tx descriptor.
+ *
+ * Return: 0 and the index that can be provided to Tx descriptor on success,
+ * -errno otherwise.
+ */
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ struct list_head *head;
+
+ /* Get the index from the free latches list */
+ spin_lock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ head = &tx_q->cached_tstamp_caps->latches_free;
+ if (list_empty(head)) {
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+ return -ENOBUFS;
+ }
+
+ ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp,
+ list_member);
+ list_del(&ptp_tx_tstamp->list_member);
+
+ ptp_tx_tstamp->skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Move the element to the used latches list */
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_q->cached_tstamp_caps->latches_in_use);
+ spin_unlock(&tx_q->cached_tstamp_caps->latches_lock);
+
+ *idx = ptp_tx_tstamp->idx;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping
+ * @vport: Virtual port structure
+ * @rx_filter: Receive timestamp filter
+ */
+static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
+{
+ bool enable = true, splitq;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE) {
+ enable = false;
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ } else {
+ vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+
+ for (u16 i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ struct idpf_rx_queue *rx_queue;
+ u16 j, num_rxq;
+
+ if (splitq)
+ num_rxq = grp->splitq.num_rxq_sets;
+ else
+ num_rxq = grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (splitq)
+ rx_queue = &grp->splitq.rxq_sets[j]->rxq;
+ else
+ rx_queue = grp->singleq.rxqs[j];
+
+ if (enable)
+ idpf_queue_set(PTP, rx_queue);
+ else
+ idpf_queue_clear(PTP, rx_queue);
+ }
+ }
+}
+
+/**
+ * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @vport: Virtual port structure
+ * @config: Hwtstamp settings requested or saved
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vport->tstamp_config.tx_type = config->tx_type;
+ idpf_ptp_set_rx_tstamp(vport, config->rx_filter);
+ *config = vport->tstamp_config;
+
+ return 0;
+}
+
+/**
+ * idpf_tstamp_task - Delayed task to handle Tx tstamps
+ * @work: work_struct handle
+ */
+void idpf_tstamp_task(struct work_struct *work)
+{
+ struct idpf_vport *vport;
+
+ vport = container_of(work, struct idpf_vport, tstamp_task);
+
+ idpf_ptp_get_tx_tstamp(vport);
+}
+
+/**
+ * idpf_ptp_do_aux_work - Do PTP periodic work
+ * @info: Driver's PTP info structure
+ *
+ * Return: Number of jiffies to periodic work.
+ */
+static long idpf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ idpf_ptp_update_cached_phctime(adapter);
+
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * idpf_ptp_set_caps - Set PTP capabilities
+ * @adapter: Driver specific private structure
+ *
+ * This function sets the PTP functions.
+ */
+static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp->info;
+
+ snprintf(info->name, sizeof(info->name), "%s-%s-clk",
+ KBUILD_MODNAME, pci_name(adapter->pdev));
+
+ info->owner = THIS_MODULE;
+ info->max_adj = adapter->ptp->max_adj;
+ info->gettimex64 = idpf_ptp_gettimex64;
+ info->settime64 = idpf_ptp_settime64;
+ info->adjfine = idpf_ptp_adjfine;
+ info->adjtime = idpf_ptp_adjtime;
+ info->verify = idpf_ptp_verify_pin;
+ info->enable = idpf_ptp_gpio_enable;
+ info->do_aux_work = idpf_ptp_do_aux_work;
+}
+
+/**
+ * idpf_ptp_create_clock - Create PTP clock device for userspace
+ * @adapter: Driver specific private structure
+ *
+ * This function creates a new PTP clock device.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_create_clock(const struct idpf_adapter *adapter)
+{
+ struct ptp_clock *clock;
+
+ idpf_ptp_set_caps(adapter);
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(&adapter->ptp->info,
+ &adapter->pdev->dev);
+ if (IS_ERR(clock)) {
+ pci_err(adapter->pdev, "PTP clock creation failed: %pe\n",
+ clock);
+ return PTR_ERR(clock);
+ }
+
+ adapter->ptp->clock = clock;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a
+ * given vport.
+ * @vport: Virtual port structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries for a
+ * given vport.
+ */
+static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
+{
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct list_head *head;
+
+ cancel_work_sync(&vport->tstamp_task);
+
+ /* Remove list with free latches */
+ spin_lock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ head = &vport->tx_tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ /* Remove list with latches in use */
+ head = &vport->tx_tstamp_caps->latches_in_use;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
+
+ kfree(vport->tx_tstamp_caps);
+ vport->tx_tstamp_caps = NULL;
+}
+
+/**
+ * idpf_ptp_release_tstamp - Release the Tx timestamps trackers
+ * @adapter: Driver specific private structure
+ *
+ * Remove the queues and delete lists that tracks Tx timestamp entries.
+ */
+static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter)
+{
+ idpf_for_each_vport(adapter, vport) {
+ if (!idpf_ptp_is_vport_tx_tstamp_ena(vport))
+ continue;
+
+ idpf_ptp_release_vport_tstamp(vport);
+ }
+}
+
+/**
+ * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability
+ * for a given tx queue.
+ * @txq: Transmit queue
+ *
+ * Since performing timestamp flows requires reading the device clock value and
+ * the support in the Control Plane, the function checks both factors and
+ * summarizes the support for the timestamping.
+ *
+ * Return: true if the timestamping is supported, false otherwise.
+ */
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ if (!txq || !txq->cached_tstamp_caps)
+ return false;
+ else if (txq->cached_tstamp_caps->access)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * idpf_ptp_init - Initialize PTP hardware clock support
+ * @adapter: Driver specific private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions. Function will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ struct timespec64 ts;
+ int err;
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) {
+ pci_dbg(adapter->pdev, "PTP capability is not detected\n");
+ return -EOPNOTSUPP;
+ }
+
+ adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL);
+ if (!adapter->ptp)
+ return -ENOMEM;
+
+ /* add a back pointer to adapter */
+ adapter->ptp->adapter = adapter;
+
+ if (adapter->dev_ops.reg_ops.ptp_reg_init)
+ adapter->dev_ops.reg_ops.ptp_reg_init(adapter);
+
+ err = idpf_ptp_get_caps(adapter);
+ if (err) {
+ pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err);
+ goto free_ptp;
+ }
+
+ err = idpf_ptp_create_clock(adapter);
+ if (err)
+ goto free_ptp;
+
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_schedule_worker(adapter->ptp->clock, 0);
+
+ /* Write the default increment time value if the clock adjustments
+ * are enabled.
+ */
+ if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) {
+ err = idpf_ptp_adj_dev_clk_fine(adapter,
+ adapter->ptp->base_incval);
+ if (err)
+ goto remove_clock;
+ }
+
+ /* Write the initial time value if the set time operation is enabled */
+ if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) {
+ ts = ktime_to_timespec64(ktime_get_real());
+ err = idpf_ptp_settime64(&adapter->ptp->info, &ts);
+ if (err)
+ goto remove_clock;
+ }
+
+ spin_lock_init(&adapter->ptp->read_dev_clk_lock);
+
+ pci_dbg(adapter->pdev, "PTP init successful\n");
+
+ return 0;
+
+remove_clock:
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(adapter->ptp->clock);
+ adapter->ptp->clock = NULL;
+
+free_ptp:
+ kfree(adapter->ptp);
+ adapter->ptp = NULL;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_release - Clear PTP hardware clock support
+ * @adapter: Driver specific private structure
+ */
+void idpf_ptp_release(struct idpf_adapter *adapter)
+{
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->tx_tstamp_access != IDPF_PTP_NONE &&
+ ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ idpf_ptp_release_tstamp(adapter);
+
+ if (ptp->clock) {
+ if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE)
+ ptp_cancel_worker_sync(adapter->ptp->clock);
+
+ ptp_clock_unregister(ptp->clock);
+ }
+
+ kfree(ptp);
+ adapter->ptp = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
new file mode 100644
index 000000000000..a876749d6116
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_PTP_H
+#define _IDPF_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct idpf_ptp_cmd - PTP command masks
+ * @exec_cmd_mask: mask to trigger command execution
+ * @shtime_enable_mask: mask to enable shadow time
+ */
+struct idpf_ptp_cmd {
+ u32 exec_cmd_mask;
+ u32 shtime_enable_mask;
+};
+
+/* struct idpf_ptp_dev_clk_regs - PTP device registers
+ * @dev_clk_ns_l: low part of the device clock register
+ * @dev_clk_ns_h: high part of the device clock register
+ * @phy_clk_ns_l: low part of the PHY clock register
+ * @phy_clk_ns_h: high part of the PHY clock register
+ * @incval_l: low part of the increment value register
+ * @incval_h: high part of the increment value register
+ * @shadj_l: low part of the shadow adjust register
+ * @shadj_h: high part of the shadow adjust register
+ * @phy_incval_l: low part of the PHY increment value register
+ * @phy_incval_h: high part of the PHY increment value register
+ * @phy_shadj_l: low part of the PHY shadow adjust register
+ * @phy_shadj_h: high part of the PHY shadow adjust register
+ * @cmd: PTP command register
+ * @phy_cmd: PHY command register
+ * @cmd_sync: PTP command synchronization register
+ */
+struct idpf_ptp_dev_clk_regs {
+ /* Main clock */
+ void __iomem *dev_clk_ns_l;
+ void __iomem *dev_clk_ns_h;
+
+ /* PHY timer */
+ void __iomem *phy_clk_ns_l;
+ void __iomem *phy_clk_ns_h;
+
+ /* Main timer adjustments */
+ void __iomem *incval_l;
+ void __iomem *incval_h;
+ void __iomem *shadj_l;
+ void __iomem *shadj_h;
+
+ /* PHY timer adjustments */
+ void __iomem *phy_incval_l;
+ void __iomem *phy_incval_h;
+ void __iomem *phy_shadj_l;
+ void __iomem *phy_shadj_h;
+
+ /* Command */
+ void __iomem *cmd;
+ void __iomem *phy_cmd;
+ void __iomem *cmd_sync;
+};
+
+/**
+ * enum idpf_ptp_access - the type of access to PTP operations
+ * @IDPF_PTP_NONE: no access
+ * @IDPF_PTP_DIRECT: direct access through BAR registers
+ * @IDPF_PTP_MAILBOX: access through mailbox messages
+ */
+enum idpf_ptp_access {
+ IDPF_PTP_NONE = 0,
+ IDPF_PTP_DIRECT,
+ IDPF_PTP_MAILBOX,
+};
+
+/**
+ * struct idpf_ptp_secondary_mbx - PTP secondary mailbox
+ * @peer_mbx_q_id: PTP mailbox queue ID
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @valid: indicates whether secondary mailblox is supported by the Control
+ * Plane
+ */
+struct idpf_ptp_secondary_mbx {
+ u16 peer_mbx_q_id;
+ u16 peer_id;
+ bool valid:1;
+};
+
+/**
+ * enum idpf_ptp_tx_tstamp_state - Tx timestamp states
+ * @IDPF_PTP_FREE: Tx timestamp index free to use
+ * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor
+ * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read
+ */
+enum idpf_ptp_tx_tstamp_state {
+ IDPF_PTP_FREE,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE,
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp
+ * @skb: the pointer to the SKB that received the completion tag
+ * @state: the state of the Tx timestamp
+ */
+struct idpf_ptp_tx_tstamp_status {
+ struct sk_buff *skb;
+ enum idpf_ptp_tx_tstamp_state state;
+};
+
+/**
+ * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping
+ * @list_member: the list member structure
+ * @tx_latch_reg_offset_l: Tx tstamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx tstamp latch high register offset
+ * @skb: the pointer to the SKB for this timestamp request
+ * @tstamp: the Tx tstamp value
+ * @idx: the index of the Tx tstamp
+ */
+struct idpf_ptp_tx_tstamp {
+ struct list_head list_member;
+ u32 tx_latch_reg_offset_l;
+ u32 tx_latch_reg_offset_h;
+ struct sk_buff *skb;
+ u64 tstamp;
+ u32 idx;
+};
+
+/**
+ * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities
+ * @vport_id: the vport id
+ * @num_entries: the number of negotiated Tx timestamp entries
+ * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp
+ * @latches_lock: the lock to the lists of free/used timestamp indexes
+ * @status_lock: the lock to the status tracker
+ * @access: indicates an access to Tx timestamp
+ * @latches_free: the list of the free Tx timestamps latches
+ * @latches_in_use: the list of the used Tx timestamps latches
+ * @tx_tstamp_status: Tx tstamp status tracker
+ */
+struct idpf_ptp_vport_tx_tstamp_caps {
+ u32 vport_id;
+ u16 num_entries;
+ u16 tstamp_ns_lo_bit;
+ spinlock_t latches_lock;
+ spinlock_t status_lock;
+ bool access:1;
+ struct list_head latches_free;
+ struct list_head latches_in_use;
+ struct idpf_ptp_tx_tstamp_status tx_tstamp_status[];
+};
+
+/**
+ * struct idpf_ptp - PTP parameters
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @adapter: back pointer to the adapter
+ * @base_incval: base increment value of the PTP clock
+ * @max_adj: maximum adjustment of the PTP clock
+ * @cmd: HW specific command masks
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
+ * @dev_clk_regs: the set of registers to access the device clock
+ * @caps: PTP capabilities negotiated with the Control Plane
+ * @get_dev_clk_time_access: access type for getting the device clock time
+ * @set_dev_clk_time_access: access type for setting the device clock time
+ * @adj_dev_clk_time_access: access type for the adjusting the device clock
+ * @tx_tstamp_access: access type for the Tx timestamp value read
+ * @rsv: reserved bits
+ * @secondary_mbx: parameters for using dedicated PTP mailbox
+ * @read_dev_clk_lock: spinlock protecting access to the device clock read
+ * operation executed by the HW latch
+ */
+struct idpf_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct idpf_adapter *adapter;
+ u64 base_incval;
+ u64 max_adj;
+ struct idpf_ptp_cmd cmd;
+ u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
+ struct idpf_ptp_dev_clk_regs dev_clk_regs;
+ u32 caps;
+ enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access set_dev_clk_time_access:2;
+ enum idpf_ptp_access adj_dev_clk_time_access:2;
+ enum idpf_ptp_access tx_tstamp_access:2;
+ u8 rsv;
+ struct idpf_ptp_secondary_mbx secondary_mbx;
+ spinlock_t read_dev_clk_lock;
+};
+
+/**
+ * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info
+ * @info: pointer to ptp_clock_info struct
+ *
+ * Return: pointer to the corresponding adapter struct
+ */
+static inline struct idpf_adapter *
+idpf_ptp_info_to_adapter(const struct ptp_clock_info *info)
+{
+ const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp,
+ info);
+ return ptp->adapter;
+}
+
+/**
+ * struct idpf_ptp_dev_timers - System time and device time values
+ * @sys_time_ns: system time value expressed in nanoseconds
+ * @dev_clk_time_ns: device clock time value expressed in nanoseconds
+ */
+struct idpf_ptp_dev_timers {
+ u64 sys_time_ns;
+ u64 dev_clk_time_ns;
+};
+
+/**
+ * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Tx timestamp capabilities are negotiated with the Control Plane only if the
+ * device clock value can be read, Tx timestamp access type is different than
+ * NONE, and the PTP clock for the adapter is created. When all those conditions
+ * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is
+ * allocated and fulfilled.
+ *
+ * Return: true if the Tx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->tx_tstamp_caps)
+ return false;
+ else
+ return true;
+}
+
+/**
+ * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for
+ * a given vport.
+ * @vport: Virtual port structure
+ *
+ * Rx timestamp feature is enabled if the PTP clock for the adapter is created
+ * and it is possible to read the value of the device clock. The second
+ * assumption comes from the need to extend the Rx timestamp value to 64 bit
+ * based on the current device clock time.
+ *
+ * Return: true if the Rx timestamping is enabled, false otherwise.
+ */
+static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport)
+{
+ if (!vport->adapter->ptp ||
+ vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE)
+ return false;
+ else
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int idpf_ptp_init(struct idpf_adapter *adapter);
+void idpf_ptp_release(struct idpf_adapter *adapter);
+int idpf_ptp_get_caps(struct idpf_adapter *adapter);
+void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
+bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport);
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport);
+int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config);
+u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp);
+u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp);
+int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ u32 *idx);
+void idpf_tstamp_task(struct work_struct *work);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int idpf_ptp_init(struct idpf_adapter *adapter)
+{
+ return 0;
+}
+
+static inline void idpf_ptp_release(struct idpf_adapter *adapter) { }
+
+static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { }
+
+static inline bool
+idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq)
+{
+ return false;
+}
+
+static inline int
+idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
+ u64 time)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter,
+ u64 incval)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter,
+ s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+idpf_ptp_set_timestamp_mode(struct idpf_vport *vport,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp)
+{
+ return 0;
+}
+
+static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time,
+ u32 in_timestamp)
+{
+ return 0;
+}
+
+static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q,
+ struct sk_buff *skb, u32 *idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void idpf_tstamp_task(struct work_struct *work) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
+#endif /* _IDPF_PTP_H */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index eae1b6f474e6..2e356dd10812 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -891,7 +891,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
- * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -901,21 +900,20 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct libeth_rqe_info *fields, u32 *ptype)
+ struct libeth_rqe_info *fields)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
- * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -925,12 +923,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct libeth_rqe_info *fields, u32 *ptype)
+ struct libeth_rqe_info *fields)
{
fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -938,18 +936,17 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
- * @ptype: pointer that will store packet type
*
*/
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct libeth_rqe_info *fields, u32 *ptype)
+ struct libeth_rqe_info *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
- idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
/**
@@ -972,7 +969,6 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
- u32 ptype;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rx_q->rx[ntc];
@@ -993,7 +989,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
*/
dma_rmb();
- idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype);
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
@@ -1037,7 +1033,8 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc,
+ fields.ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index bdf52cef3891..631679cdaa6f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -5,6 +5,7 @@
#include <net/libeth/tx.h>
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
struct idpf_tx_stash {
@@ -1107,6 +1108,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*/
static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
+ struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
+ struct work_struct *tstamp_task = &vport->tstamp_task;
int i, j, k = 0;
vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
@@ -1121,6 +1124,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
for (j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
+
+ if (!caps)
+ continue;
+
+ vport->txqs[k]->cached_tstamp_caps = caps;
+ vport->txqs[k]->tstamp_task = tstamp_task;
}
}
@@ -1655,6 +1664,40 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
+ * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
+ * @txq: queue to read the timestamp from
+ * @skb: socket buffer to provide Tx timestamp value
+ *
+ * Schedule a work to read Tx timestamp value generated once the packet is
+ * transmitted.
+ */
+static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_ptp_tx_tstamp_status *tx_tstamp_status;
+
+ tx_tstamp_caps = txq->cached_tstamp_caps;
+ spin_lock_bh(&tx_tstamp_caps->status_lock);
+
+ for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) {
+ tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i];
+ if (tx_tstamp_status->state != IDPF_PTP_FREE)
+ continue;
+
+ tx_tstamp_status->skb = skb;
+ tx_tstamp_status->state = IDPF_PTP_REQUEST;
+
+ /* Fetch timestamp from completion descriptor through
+ * virtchnl msg to report to stack.
+ */
+ queue_work(system_unbound_wq, txq->tstamp_task);
+ break;
+ }
+
+ spin_unlock_bh(&tx_tstamp_caps->status_lock);
+}
+
+/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1682,6 +1725,11 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
continue;
hash_del(&stash->hlist);
+
+ if (stash->buf.type == LIBETH_SQE_SKB &&
+ (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
+ idpf_tx_read_tstamp(txq, stash->buf.skb);
+
libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
@@ -1876,8 +1924,12 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
return false;
- if (tx_buf->type == LIBETH_SQE_SKB)
+ if (tx_buf->type == LIBETH_SQE_SKB) {
+ if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ idpf_tx_read_tstamp(txq, tx_buf->skb);
+
libeth_tx_complete(tx_buf, &cp);
+ }
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
@@ -2127,7 +2179,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
- desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd);
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
@@ -2296,7 +2348,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
* descriptor. Reset that here.
*/
tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
+ memset(tx_desc, 0, sizeof(*tx_desc));
if (idx == 0)
idx = txq->desc_count;
idx--;
@@ -2699,10 +2751,10 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
*/
-static struct idpf_flex_tx_ctx_desc *
+static union idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
- struct idpf_flex_tx_ctx_desc *desc;
+ union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
txq->tx_buf[i].type = LIBETH_SQE_CTX;
@@ -2732,6 +2784,73 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK))
+/**
+ * idpf_tx_tstamp - set up context descriptor for hardware timestamp
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @off: pointer to the offload struct
+ *
+ * Return: Positive index number on success, negative otherwise.
+ */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ int err, idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return -1;
+
+ if (!idpf_ptp_get_txq_tstamp_capability(tx_q))
+ return -1;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (off->tx_flags & IDPF_TX_FLAGS_TSO)
+ return -1;
+
+ /* Grab an open timestamp slot */
+ err = idpf_ptp_request_ts(tx_q, skb, &idx);
+ if (err) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tstamp_skipped);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ return -1;
+ }
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSYN;
+
+ return idx;
+}
+
+/**
+ * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate
+ * PHY Tx timestamp
+ * @ctx_desc: Context descriptor
+ * @idx: Index of the Tx timestamp latch
+ */
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{
+ ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX,
+ IDPF_TX_CTX_DTYPE_M) |
+ le64_encode_bits(IDPF_TX_CTX_DESC_TSYN,
+ IDPF_TX_CTX_CMD_M) |
+ le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M);
+}
+#else /* CONFIG_PTP_1588_CLOCK */
+static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ return -1;
+}
+
+static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
+ u32 idx)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
@@ -2743,9 +2862,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
+ union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
unsigned int count;
- int tso;
+ int tso, idx;
count = idpf_tx_desc_count_required(tx_q, skb);
if (unlikely(!count))
@@ -2765,8 +2885,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
if (tso) {
/* If tso is needed, set up context desc */
- struct idpf_flex_tx_ctx_desc *ctx_desc =
- idpf_tx_splitq_get_ctx_desc(tx_q);
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
ctx_desc->tso.qw1.cmd_dtype =
cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
@@ -2784,6 +2903,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
u64_stats_update_end(&tx_q->stats_sync);
}
+ idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload);
+ if (idx != -1) {
+ ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q);
+ idpf_tx_set_tstamp_desc(ctx_desc, idx);
+ }
+
/* record the location of the first descriptor for this packet */
first = &tx_q->tx_buf[tx_q->next_to_use];
first->skb = skb;
@@ -3046,6 +3171,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
}
/**
+ * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack
+ * @rxq: pointer to the rx queue that receives the timestamp
+ * @rx_desc: pointer to rx descriptor containing timestamp
+ * @skb: skb to put timestamp in
+ */
+static void
+idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ ts_high = le32_to_cpu(rx_desc->ts_high);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ts_ns),
+ };
+}
+
+/**
* idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
@@ -3070,6 +3222,9 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, decoded);
+ if (idpf_queue_has(PTP, rxq))
+ idpf_rx_hwtstamp(rxq, rx_desc, skb);
+
skb->protocol = eth_type_trans(skb, rxq->netdev);
skb_record_rx_queue(skb, rxq->idx);
@@ -4025,6 +4180,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ /* Switch to poll mode in the tear-down path after sending disable
+ * queues virtchnl message, as the interrupts will be disabled after
+ * that.
+ */
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
+ return budget;
+
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
@@ -4035,15 +4198,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
else
idpf_vport_intr_set_wb_on_itr(q_vector);
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
- else
- return work_done;
+ return work_done;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index b029f566e57c..c779fe71df99 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -142,6 +142,7 @@ do { \
#define IDPF_TX_FLAGS_IPV4 BIT(1)
#define IDPF_TX_FLAGS_IPV6 BIT(2)
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+#define IDPF_TX_FLAGS_TSYN BIT(4)
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
@@ -289,6 +290,8 @@ struct idpf_ptype_state {
* @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
+ * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
+ * queue
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
@@ -299,6 +302,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
+ __IDPF_Q_PTP,
__IDPF_Q_FLAGS_NBITS,
};
@@ -443,6 +447,7 @@ struct idpf_tx_queue_stats {
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
+ u64_stats_t tstamp_skipped;
};
#define IDPF_ITR_DYNAMIC 1
@@ -494,6 +499,7 @@ struct idpf_txq_stash {
* @next_to_alloc: RX buffer to allocate at
* @skb: Pointer to the skb
* @truesize: data buffer truesize in singleq
+ * @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats
* @q_id: Queue id
@@ -541,6 +547,7 @@ struct idpf_rx_queue {
struct sk_buff *skb;
u32 truesize;
+ u64 cached_phc_time;
struct u64_stats_sync stats_sync;
struct idpf_rx_queue_stats q_stats;
@@ -560,7 +567,7 @@ struct idpf_rx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
- 80 + sizeof(struct u64_stats_sync),
+ 88 + sizeof(struct u64_stats_sync),
32);
/**
@@ -617,6 +624,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
+ * @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_tx_queue_stats
* @q_id: Queue id
@@ -630,7 +639,7 @@ struct idpf_tx_queue {
struct idpf_base_tx_desc *base_tx;
struct idpf_base_tx_ctx_desc *base_ctx;
union idpf_tx_flex_desc *flex_tx;
- struct idpf_flex_tx_ctx_desc *flex_ctx;
+ union idpf_flex_tx_ctx_desc *flex_ctx;
void *desc_ring;
};
@@ -666,6 +675,9 @@ struct idpf_tx_queue {
u16 compl_tag_cur_gen;
u16 compl_tag_gen_max;
+ struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
+ struct work_struct *tstamp_task;
+
struct u64_stats_sync stats_sync;
struct idpf_tx_queue_stats q_stats;
__cacheline_group_end_aligned(read_write);
@@ -679,7 +691,7 @@ struct idpf_tx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 88 + sizeof(struct u64_stats_sync),
+ 112 + sizeof(struct u64_stats_sync),
24);
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 3d2413b8684f..07a9f5ae34fd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -5,88 +5,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
-
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
-#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
-#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
-#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
-#define IDPF_VC_XN_RING_LEN U8_MAX
-
-/**
- * enum idpf_vc_xn_state - Virtchnl transaction status
- * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
- * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
- * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
- * buffer updated
- * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
- * was an error, buffer not updated
- * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
- * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
- * return context; a callback may be provided to handle
- * return
- */
-enum idpf_vc_xn_state {
- IDPF_VC_XN_IDLE = 1,
- IDPF_VC_XN_WAITING,
- IDPF_VC_XN_COMPLETED_SUCCESS,
- IDPF_VC_XN_COMPLETED_FAILED,
- IDPF_VC_XN_SHUTDOWN,
- IDPF_VC_XN_ASYNC,
-};
-
-struct idpf_vc_xn;
-/* Callback for asynchronous messages */
-typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
- const struct idpf_ctlq_msg *);
-
-/**
- * struct idpf_vc_xn - Data structure representing virtchnl transactions
- * @completed: virtchnl event loop uses that to signal when a reply is
- * available, uses kernel completion API
- * @state: virtchnl event loop stores the data below, protected by the
- * completion's lock.
- * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
- * truncated on its way to the receiver thread according to
- * reply_buf.iov_len.
- * @reply: Reference to the buffer(s) where the reply data should be written
- * to. May be 0-length (then NULL address permitted) if the reply data
- * should be ignored.
- * @async_handler: if sent asynchronously, a callback can be provided to handle
- * the reply when it's received
- * @vc_op: corresponding opcode sent with this transaction
- * @idx: index used as retrieval on reply receive, used for cookie
- * @salt: changed every message to make unique, used for cookie
- */
-struct idpf_vc_xn {
- struct completion completed;
- enum idpf_vc_xn_state state;
- size_t reply_sz;
- struct kvec reply;
- async_vc_cb async_handler;
- u32 vc_op;
- u8 idx;
- u8 salt;
-};
-
-/**
- * struct idpf_vc_xn_params - Parameters for executing transaction
- * @send_buf: kvec for send buffer
- * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
- * @timeout_ms: timeout to wait for reply
- * @async: send message asynchronously, will not wait on completion
- * @async_handler: If sent asynchronously, optional callback handler. The user
- * must be careful when using async handlers as the memory for
- * the recv_buf _cannot_ be on stack if this is async.
- * @vc_op: virtchnl op to send
- */
-struct idpf_vc_xn_params {
- struct kvec send_buf;
- struct kvec recv_buf;
- int timeout_ms;
- bool async;
- async_vc_cb async_handler;
- u32 vc_op;
-};
+#include "idpf_ptp.h"
/**
* struct idpf_vc_xn_manager - Manager for tracking transactions
@@ -235,6 +154,55 @@ err_kfree:
return err;
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * idpf_ptp_is_mb_msg - Check if the message is PTP-related
+ * @op: virtchnl opcode
+ *
+ * Return: true if msg is PTP-related, false otherwise.
+ */
+static bool idpf_ptp_is_mb_msg(u32 op)
+{
+ switch (op) {
+ case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_CROSS_TIME:
+ case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE:
+ case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS:
+ case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_prepare_ptp_mb_msg - Prepare PTP related message
+ *
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @ctlq_msg: Corresponding control queue message
+ */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ /* If the message is PTP-related and the secondary mailbox is available,
+ * send the message through the secondary mailbox.
+ */
+ if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid)
+ return;
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv;
+ ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id;
+ ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id;
+}
+#else /* !CONFIG_PTP_1588_CLOCK */
+static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
+ struct idpf_ctlq_msg *ctlq_msg)
+{ }
+#endif /* CONFIG_PTP_1588_CLOCK */
+
/**
* idpf_send_mb_msg - Send message over mailbox
* @adapter: Driver specific private structure
@@ -278,6 +246,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
ctlq_msg->func_id = 0;
+
+ idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg);
+
ctlq_msg->data_len = msg_size;
ctlq_msg->cookie.mbx.chnl_opcode = op;
ctlq_msg->cookie.mbx.chnl_retval = 0;
@@ -449,8 +420,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
* >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
* error.
*/
-static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
- const struct idpf_vc_xn_params *params)
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
{
const struct kvec *send_buf = &params->send_buf;
struct idpf_vc_xn *xn;
@@ -900,7 +871,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
- VIRTCHNL2_CAP_LOOPBACK);
+ VIRTCHNL2_CAP_LOOPBACK |
+ VIRTCHNL2_CAP_PTP);
xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
xn_params.send_buf.iov_base = &caps;
@@ -3029,6 +3001,11 @@ restart:
goto err_intr_req;
}
+ err = idpf_ptp_init(adapter);
+ if (err)
+ pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
+ ERR_PTR(err));
+
idpf_init_avail_queues(adapter);
/* Skew the delay for init tasks for each function based on fn number
@@ -3091,6 +3068,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
if (!remove_in_prog)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
+ idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
@@ -3158,6 +3136,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
u16 rx_itr[] = {2, 8, 32, 96, 128};
struct idpf_rss_data *rss_data;
u16 idx = vport->idx;
+ int err;
vport_config = adapter->vport_config[idx];
rss_data = &vport_config->user_config.rss_data;
@@ -3192,6 +3171,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_alloc_vec_indexes(vport);
vport->crc_enable = adapter->crc_enable;
+
+ if (!(vport_msg->vport_flags &
+ cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
+ return;
+
+ err = idpf_ptp_get_vport_tstamps_caps(vport);
+ if (err) {
+ pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
+ return;
+ }
+
+ INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 83da5d8da56b..3522c1238ea2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,6 +4,88 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer
+ * updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
struct idpf_adapter;
struct idpf_netdev_priv;
struct idpf_vec_regs;
@@ -11,6 +93,8 @@ struct idpf_vport;
struct idpf_vport_max_q;
struct idpf_vport_user_config_data;
+ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params);
int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
new file mode 100644
index 000000000000..bdcc54a5fb56
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
+
+/**
+ * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl get PTP capabilities message.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_ptp_get_caps(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL;
+ struct virtchnl2_ptp_get_caps send_ptp_caps_msg = {
+ .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME |
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME |
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB |
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB |
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB)
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS,
+ .send_buf.iov_base = &send_ptp_caps_msg,
+ .send_buf.iov_len = sizeof(send_ptp_caps_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+ struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
+ struct idpf_ptp_secondary_mbx *scnd_mbx;
+ struct idpf_ptp *ptp = adapter->ptp;
+ enum idpf_ptp_access access_type;
+ u32 temp_offset;
+ int reply_sz;
+
+ recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps),
+ GFP_KERNEL);
+ if (!recv_ptp_caps_msg)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = recv_ptp_caps_msg;
+ xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ else if (reply_sz != sizeof(*recv_ptp_caps_msg))
+ return -EIO;
+
+ ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps);
+ ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval);
+ ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj);
+
+ scnd_mbx = &ptp->secondary_mbx;
+ scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id);
+
+ /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary
+ * mailbox is not supported.
+ */
+ scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff;
+ if (scnd_mbx->valid)
+ scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id;
+
+ /* Determine the access type for the PTP features */
+ idpf_ptp_get_features_access(adapter);
+
+ access_type = ptp->get_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ clock_offsets = recv_ptp_caps_msg->clk_offsets;
+
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l);
+ ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h);
+ ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l);
+ ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h);
+ ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
+discipline_clock:
+ access_type = ptp->adj_dev_clk_time_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ return 0;
+
+ clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets;
+
+ /* Device clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type);
+ ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l);
+ ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h);
+ ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l);
+ ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h);
+ ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ /* PHY clock offsets */
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type);
+ ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l);
+ ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h);
+ ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l);
+ ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset);
+ temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h);
+ ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message
+ * @adapter: Driver specific private structure
+ * @dev_clk_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get time message to get the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *dev_clk_time)
+{
+ struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME,
+ .send_buf.iov_base = &get_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .recv_buf.iov_base = &get_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(get_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+ u64 dev_time;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(get_dev_clk_time_msg))
+ return -EIO;
+
+ dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns);
+ dev_clk_time->dev_clk_time_ns = dev_time;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
+ * @adapter: Driver specific private structure
+ * @time: New time value
+ *
+ * Send virtchnl set time message to set the time of the clock.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time)
+{
+ struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = {
+ .dev_time_ns = cpu_to_le64(time),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME,
+ .send_buf.iov_base = &set_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .recv_buf.iov_base = &set_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(set_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(set_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message
+ * @adapter: Driver specific private structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * Send virtchnl adj time message to adjust the clock by the indicated delta.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta)
+{
+ struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = {
+ .delta = cpu_to_le64(delta),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME,
+ .send_buf.iov_base = &adj_dev_clk_time_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .recv_buf.iov_base = &adj_dev_clk_time_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_time_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message
+ * @adapter: Driver specific private structure
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Send virtchnl adj fine message to adjust the frequency of the clock by
+ * incval.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval)
+{
+ struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = {
+ .incval = cpu_to_le64(incval),
+ };
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE,
+ .send_buf.iov_base = &adj_dev_clk_fine_msg,
+ .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .recv_buf.iov_base = &adj_dev_clk_fine_msg,
+ .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(adj_dev_clk_fine_msg))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get vport tstamps caps message to receive the set of tstamp
+ * capabilities per vport.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps;
+ struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps;
+ struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps;
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS,
+ .send_buf.iov_base = &send_tx_tstamp_caps,
+ .send_buf.iov_len = sizeof(send_tx_tstamp_caps),
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ enum idpf_ptp_access tstamp_access, get_dev_clk_access;
+ struct idpf_ptp *ptp = vport->adapter->ptp;
+ struct list_head *head;
+ int err = 0, reply_sz;
+ u16 num_latches;
+ u32 size;
+
+ if (!ptp)
+ return -EOPNOTSUPP;
+
+ tstamp_access = ptp->tx_tstamp_access;
+ get_dev_clk_access = ptp->get_dev_clk_time_access;
+ if (tstamp_access == IDPF_PTP_NONE ||
+ get_dev_clk_access == IDPF_PTP_NONE)
+ return -EOPNOTSUPP;
+
+ rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcv_tx_tstamp_caps)
+ return -ENOMEM;
+
+ send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id);
+ xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto get_tstamp_caps_out;
+ }
+
+ num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches);
+ size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches);
+ if (reply_sz != size) {
+ err = -EIO;
+ goto get_tstamp_caps_out;
+ }
+
+ size = struct_size(tstamp_caps, tx_tstamp_status, num_latches);
+ tstamp_caps = kzalloc(size, GFP_KERNEL);
+ if (!tstamp_caps) {
+ err = -ENOMEM;
+ goto get_tstamp_caps_out;
+ }
+
+ tstamp_caps->access = true;
+ tstamp_caps->num_entries = num_latches;
+
+ INIT_LIST_HEAD(&tstamp_caps->latches_in_use);
+ INIT_LIST_HEAD(&tstamp_caps->latches_free);
+
+ spin_lock_init(&tstamp_caps->latches_lock);
+ spin_lock_init(&tstamp_caps->status_lock);
+
+ tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ for (u16 i = 0; i < tstamp_caps->num_entries; i++) {
+ __le32 offset_l, offset_h;
+
+ ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL);
+ if (!ptp_tx_tstamp) {
+ err = -ENOMEM;
+ goto err_free_ptp_tx_stamp_list;
+ }
+
+ tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i];
+
+ if (tstamp_access != IDPF_PTP_DIRECT)
+ goto skip_offsets;
+
+ offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l;
+ offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h;
+ ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l);
+ ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h);
+
+skip_offsets:
+ ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index;
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tstamp_caps->latches_free);
+
+ tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE;
+ }
+
+ vport->tx_tstamp_caps = tstamp_caps;
+ kfree(rcv_tx_tstamp_caps);
+
+ return 0;
+
+err_free_ptp_tx_stamp_list:
+ head = &tstamp_caps->latches_free;
+ list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ list_del(&ptp_tx_tstamp->list_member);
+ kfree(ptp_tx_tstamp);
+ }
+
+ kfree(tstamp_caps);
+get_tstamp_caps_out:
+ kfree(rcv_tx_tstamp_caps);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on
+ * the skb compatibility.
+ * @caps: Tx timestamp capabilities that monitor the latch status
+ * @skb: skb for which the tstamp value is returned through virtchnl message
+ * @current_state: Current state of the Tx timestamp latch
+ * @expected_state: Expected state of the Tx timestamp latch
+ *
+ * Find a proper skb tracker for which the Tx timestamp is received and change
+ * the state to expected value.
+ *
+ * Return: true if the tracker has been found and updated, false otherwise.
+ */
+static bool
+idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps,
+ struct sk_buff *skb,
+ enum idpf_ptp_tx_tstamp_state current_state,
+ enum idpf_ptp_tx_tstamp_state expected_state)
+{
+ bool updated = false;
+
+ spin_lock(&caps->status_lock);
+ for (u16 i = 0; i < caps->num_entries; i++) {
+ struct idpf_ptp_tx_tstamp_status *status;
+
+ status = &caps->tx_tstamp_status[i];
+
+ if (skb == status->skb && status->state == current_state) {
+ status->state = expected_state;
+ updated = true;
+ break;
+ }
+ }
+ spin_unlock(&caps->status_lock);
+
+ return updated;
+}
+
+/**
+ * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it
+ * back to the skb.
+ * @vport: Virtual port structure
+ * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane
+ * @ptp_tx_tstamp: Tx timestamp latch to add to the free list
+ *
+ * Read the value of the Tx timestamp for a given latch received from the
+ * Control Plane, extend it to 64 bit and provide back to the skb.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
+ struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch,
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp)
+{
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct skb_shared_hwtstamps shhwtstamps;
+ bool state_upd = false;
+ u8 tstamp_ns_lo_bit;
+ u64 tstamp;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit;
+
+ ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp);
+ ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_READ_VALUE,
+ IDPF_PTP_FREE);
+ if (!state_upd)
+ return -EINVAL;
+
+ tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
+ consume_skb(ptp_tx_tstamp->skb);
+
+ list_add(&ptp_tx_tstamp->list_member,
+ &tx_tstamp_caps->latches_free);
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps
+ * @adapter: Driver specific private structure
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * Read the tstamps Tx tstamp values from a received message and put them
+ * directly to the skb. The number of timestamps to read is specified by
+ * the virtchnl message.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch;
+ struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp;
+ struct idpf_vport *tstamp_vport = NULL;
+ struct list_head *head;
+ u16 num_latches;
+ u32 vport_id;
+ int err = 0;
+
+ recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va;
+ vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id);
+
+ idpf_for_each_vport(adapter, vport) {
+ if (!vport)
+ continue;
+
+ if (vport->vport_id == vport_id) {
+ tstamp_vport = vport;
+ break;
+ }
+ }
+
+ if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps)
+ return -EINVAL;
+
+ tx_tstamp_caps = tstamp_vport->tx_tstamp_caps;
+ num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches);
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ head = &tx_tstamp_caps->latches_in_use;
+
+ for (u16 i = 0; i < num_latches; i++) {
+ tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i];
+
+ if (!tstamp_latch.valid)
+ continue;
+
+ if (list_empty(head)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
+
+ list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) {
+ if (tstamp_latch.index == tx_tstamp->idx) {
+ list_del(&tx_tstamp->list_member);
+ err = idpf_ptp_get_tstamp_value(tstamp_vport,
+ &tstamp_latch,
+ tx_tstamp);
+ if (err)
+ goto unlock;
+
+ break;
+ }
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message
+ * @vport: Virtual port structure
+ *
+ * Send virtchnl get Tx tstamp message to read the value of the HW timestamp.
+ * The message contains a list of indexes set in the Tx descriptors.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport)
+{
+ struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg;
+ struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ .async = true,
+ .async_handler = idpf_ptp_get_tx_tstamp_async_handler,
+ };
+ struct idpf_ptp_tx_tstamp *ptp_tx_tstamp;
+ int reply_sz, size, msg_size;
+ struct list_head *head;
+ bool state_upd;
+ u16 id = 0;
+
+ tx_tstamp_caps = vport->tx_tstamp_caps;
+ head = &tx_tstamp_caps->latches_in_use;
+
+ size = struct_size(send_tx_tstamp_msg, tstamp_latches,
+ tx_tstamp_caps->num_entries);
+ send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL);
+ if (!send_tx_tstamp_msg)
+ return -ENOMEM;
+
+ spin_lock_bh(&tx_tstamp_caps->latches_lock);
+ list_for_each_entry(ptp_tx_tstamp, head, list_member) {
+ u8 idx;
+
+ state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps,
+ ptp_tx_tstamp->skb,
+ IDPF_PTP_REQUEST,
+ IDPF_PTP_READ_VALUE);
+ if (!state_upd)
+ continue;
+
+ idx = ptp_tx_tstamp->idx;
+ send_tx_tstamp_msg->tstamp_latches[id].index = idx;
+ id++;
+ }
+ spin_unlock_bh(&tx_tstamp_caps->latches_lock);
+
+ msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id);
+ send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id);
+ send_tx_tstamp_msg->num_latches = cpu_to_le16(id);
+ xn_params.send_buf.iov_base = send_tx_tstamp_msg;
+ xn_params.send_buf.iov_len = msg_size;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ kfree(send_tx_tstamp_msg);
+
+ return min(reply_sz, 0);
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 63deb120359c..11b8f6f05799 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -68,6 +68,16 @@ enum virtchnl2_op {
VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+
+ /* TimeSync opcodes */
+ VIRTCHNL2_OP_PTP_GET_CAPS = 541,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542,
+ VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543,
+ VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544,
+ VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
+ VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
+ VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
};
/**
@@ -560,6 +570,14 @@ struct virtchnl2_queue_reg_chunks {
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
+ * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
+ * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ */
+enum virtchnl2_vport_flags {
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+};
+
+/**
* struct virtchnl2_create_vport - Create vport config info.
* @vport_type: See enum virtchnl2_vport_type.
* @txq_model: See virtchnl2_queue_model.
@@ -577,7 +595,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
* @max_mtu: Max MTU. CP populates this field on response.
* @vport_id: Vport id. CP populates this field on response.
* @default_mac_addr: Default MAC address.
- * @pad: Padding.
+ * @vport_flags: See enum virtchnl2_vport_flags.
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
@@ -610,7 +628,7 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[ETH_ALEN];
- __le16 pad;
+ __le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
u8 pad1[72];
@@ -1270,4 +1288,296 @@ struct virtchnl2_promisc_info {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+/**
+ * enum virtchnl2_ptp_caps - PTP capabilities
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device
+ * clock
+ * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of
+ * device clock
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping
+ * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping
+ *
+ * PF/VF negotiates a set of supported PTP capabilities with the Control Plane.
+ * There are two access methods - mailbox (_MB) and direct.
+ * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer,
+ * cross timestamping and the Tx timestamping.
+ */
+enum virtchnl2_ptp_caps {
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0),
+ VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2),
+ VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4),
+ VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6),
+ VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8),
+ VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9),
+};
+
+/**
+ * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks
+ * registers.
+ * @dev_clk_ns_l: Device clock low register offset
+ * @dev_clk_ns_h: Device clock high register offset
+ * @phy_clk_ns_l: PHY clock low register offset
+ * @phy_clk_ns_h: PHY clock high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_clk_reg_offsets {
+ __le32 dev_clk_ns_l;
+ __le32 dev_clk_ns_h;
+ __le32 phy_clk_ns_l;
+ __le32 phy_clk_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross
+ * time registers.
+ * @sys_time_ns_l: System time low register offset
+ * @sys_time_ns_h: System time high register offset
+ * @cmd_sync_trigger: The command sync trigger register offset
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_cross_time_reg_offsets {
+ __le32 sys_time_ns_l;
+ __le32 sys_time_ns_h;
+ __le32 cmd_sync_trigger;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks
+ * adjustments registers.
+ * @dev_clk_cmd_type: Device clock command type register offset
+ * @dev_clk_incval_l: Device clock increment value low register offset
+ * @dev_clk_incval_h: Device clock increment value high registers offset
+ * @dev_clk_shadj_l: Device clock shadow adjust low register offset
+ * @dev_clk_shadj_h: Device clock shadow adjust high register offset
+ * @phy_clk_cmd_type: PHY timer command type register offset
+ * @phy_clk_incval_l: PHY timer increment value low register offset
+ * @phy_clk_incval_h: PHY timer increment value high register offset
+ * @phy_clk_shadj_l: PHY timer shadow adjust low register offset
+ * @phy_clk_shadj_h: PHY timer shadow adjust high register offset
+ */
+struct virtchnl2_ptp_clk_adj_reg_offsets {
+ __le32 dev_clk_cmd_type;
+ __le32 dev_clk_incval_l;
+ __le32 dev_clk_incval_h;
+ __le32 dev_clk_shadj_l;
+ __le32 dev_clk_shadj_h;
+ __le32 phy_clk_cmd_type;
+ __le32 phy_clk_incval_l;
+ __le32 phy_clk_incval_h;
+ __le32 phy_clk_shadj_l;
+ __le32 phy_clk_shadj_h;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch
+ * capabilities.
+ * @tx_latch_reg_offset_l: Tx timestamp latch low register offset
+ * @tx_latch_reg_offset_h: Tx timestamp latch high register offset
+ * @index: Latch index provided to the Tx descriptor
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch_caps {
+ __le32 tx_latch_reg_offset_l;
+ __le32 tx_latch_reg_offset_h;
+ u8 index;
+ u8 pad[7];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx
+ * tstamp entries.
+ * @vport_id: Vport number
+ * @num_latches: Total number of latches
+ * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp
+ * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp
+ * @pad: Padding for future tstamp granularity extensions
+ * @tstamp_latches: Capabilities of Tx timestamp entries
+ *
+ * PF/VF sends this message to negotiate the Tx timestamp latches for each
+ * Vport.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_caps {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 tstamp_ns_lo_bit;
+ u8 tstamp_ns_hi_bit;
+ u8 pad[8];
+
+ struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps);
+
+/**
+ * struct virtchnl2_ptp_get_caps - Get PTP capabilities
+ * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps
+ * @max_adj: The maximum possible frequency adjustment
+ * @base_incval: The default timer increment value
+ * @peer_mbx_q_id: ID of the PTP Device Control daemon queue
+ * @peer_id: Peer ID for PTP Device Control daemon
+ * @secondary_mbx: Indicates to the driver that it should create a secondary
+ * mailbox to inetract with control plane for PTP
+ * @pad: Padding for future extensions
+ * @clk_offsets: Main timer and PHY registers offsets
+ * @cross_time_offsets: Cross time registers offsets
+ * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer
+ *
+ * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ * If HW uses primary MBX for PTP: secondary_mbx is set to false.
+ * If HW uses secondary MBX for PTP: secondary_mbx is set to true.
+ * Control plane has 2 MBX and the driver has 1 MBX, send to peer
+ * driver may be used to send a message using valid ptp_peer_mb_q_id and
+ * ptp_peer_id.
+ * If HW does not use send to peer driver: secondary_mbx is no care field and
+ * peer_mbx_q_id holds invalid value (0xFFFF).
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_CAPS.
+ */
+struct virtchnl2_ptp_get_caps {
+ __le32 caps;
+ __le32 max_adj;
+ __le64 base_incval;
+ __le16 peer_mbx_q_id;
+ u8 peer_id;
+ u8 secondary_mbx;
+ u8 pad[4];
+
+ struct virtchnl2_ptp_clk_reg_offsets clk_offsets;
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets;
+ struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps);
+
+/**
+ * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
+ * values, index and validity.
+ * @tstamp: Timestamp value
+ * @index: Timestamp index from which the value is read
+ * @valid: Timestamp validity
+ * @pad: Padding for future extensions
+ */
+struct virtchnl2_ptp_tx_tstamp_latch {
+ __le64 tstamp;
+ u8 index;
+ u8 valid;
+ u8 pad[6];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/**
+ * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches
+ * associated with the vport.
+ * @vport_id: Number of vport that requests the timestamp
+ * @num_latches: Number of latches
+ * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp
+ * @pad: Padding for future extensions
+ * @device_time: device time if get_devtime_with_txtstmp was set in request
+ * @tstamp_latches: PTP TX timestamp latch
+ *
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ *
+ * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP.
+ */
+struct virtchnl2_ptp_get_vport_tx_tstamp_latches {
+ __le32 vport_id;
+ __le16 num_latches;
+ u8 get_devtime_with_txtstmp;
+ u8 pad[1];
+ __le64 device_time;
+
+ struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[]
+ __counted_by_le(num_latches);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches);
+
+/**
+ * struct virtchnl2_ptp_get_dev_clk_time - Associated with message
+ * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME.
+ * @dev_time_ns: Device clock time value in nanoseconds
+ *
+ * PF/VF sends this message to receive the time from the main timer.
+ */
+struct virtchnl2_ptp_get_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_get_cross_time: Associated with message
+ * VIRTCHNL2_OP_PTP_GET_CROSS_TIME.
+ * @sys_time_ns: System counter value expressed in nanoseconds, read
+ * synchronously with device time
+ * @dev_time_ns: Device clock time value expressed in nanoseconds
+ *
+ * PF/VF sends this message to receive the cross time.
+ */
+struct virtchnl2_ptp_get_cross_time {
+ __le64 sys_time_ns;
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time);
+
+/**
+ * struct virtchnl2_ptp_set_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME.
+ * @dev_time_ns: Device time value expressed in nanoseconds to set
+ *
+ * PF/VF sends this message to set the time of the main timer.
+ */
+struct virtchnl2_ptp_set_dev_clk_time {
+ __le64 dev_time_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE.
+ * @incval: Source timer increment value per clock cycle
+ *
+ * PF/VF sends this message to adjust the frequency of the main timer by the
+ * indicated increment value.
+ */
+struct virtchnl2_ptp_adj_dev_clk_fine {
+ __le64 incval;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine);
+
+/**
+ * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message
+ * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME.
+ * @delta: Offset in nanoseconds to adjust the time by
+ *
+ * PF/VF sends this message to adjust the time of the main timer by the delta.
+ */
+struct virtchnl2_ptp_adj_dev_clk_time {
+ __le64 delta;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 02f340280d20..f34ead8243e9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -391,7 +391,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG,
- IGB_RING_FLAG_TX_DISABLED
+ IGB_RING_FLAG_TX_DISABLED,
+ IGB_RING_FLAG_RX_ALLOC_FAILED,
};
#define ring_uses_large_buffer(ring) \
@@ -722,6 +723,8 @@ enum igb_boards {
extern char igb_driver_name[];
+void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *ring,
struct xdp_frame *xdpf);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c646c71915f0..9e9a5900e6e5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -947,6 +947,9 @@ static int igb_request_msix(struct igb_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igb_configure_msix(adapter);
@@ -1194,7 +1197,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
+ netif_napi_add_config(adapter->netdev, &q_vector->napi, igb_poll,
+ v_idx);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -2096,6 +2100,22 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+void igb_set_queue_napi(struct igb_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -2103,6 +2123,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
int igb_up(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i;
/* hardware has been reset, we need to reload some things */
@@ -2110,8 +2131,11 @@ int igb_up(struct igb_adapter *adapter)
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
@@ -2181,6 +2205,7 @@ void igb_down(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igb_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
@@ -4113,8 +4138,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err;
int i;
@@ -4166,8 +4192,11 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&(adapter->q_vector[i]->napi));
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igb_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(E1000_TSICR);
@@ -5726,11 +5755,29 @@ no_wait:
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
- for (i = 0; i < adapter->num_q_vectors; i++)
- eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+ struct igb_ring *rx_ring;
+
+ if (!q_vector->rx.ring)
+ continue;
+
+ rx_ring = adapter->rx_ring[q_vector->rx.ring->queue_index];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ eics |= q_vector->eims_value;
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ }
+ }
+ if (eics)
+ wr32(E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ struct igb_ring *rx_ring = adapter->rx_ring[0];
+
+ if (test_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags)) {
+ clear_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
}
igb_spoof_check(adapter);
@@ -9061,6 +9108,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
@@ -9120,6 +9168,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9136,6 +9185,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
__free_pages(page, igb_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
return false;
}
@@ -9674,8 +9724,11 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
if (netif_running(netdev))
igb_down(adapter);
+ rtnl_unlock();
+
pci_disable_device(pdev);
/* Request a slot reset. */
@@ -9734,16 +9787,21 @@ static void igb_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
if (netif_running(netdev)) {
if (!test_bit(__IGB_DOWN, &adapter->state)) {
dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
+ rtnl_unlock();
return;
}
+
if (igb_up(adapter)) {
dev_err(&pdev->dev, "igb_up failed after reset\n");
+ rtnl_unlock();
return;
}
}
+ rtnl_unlock();
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f323e1c1989f..793c96016288 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -502,13 +502,6 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Both the rising and falling edge are timestamped */
if (rq->extts.flags & PTP_STRICT_FLAGS &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -658,13 +651,6 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -1356,6 +1342,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
@@ -1378,6 +1367,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
index 157d43787fa0..5cf67ba29269 100644
--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -415,6 +415,7 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_failed++;
+ set_bit(IGB_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
break;
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 2f265c0959c7..859a15e4ccba 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -40,6 +40,11 @@ void igc_ethtool_set_ops(struct net_device *);
#define IGC_MAX_TX_TSTAMP_REGS 4
+struct igc_fpe_t {
+ struct ethtool_mmsv mmsv;
+ u32 tx_min_frag_size;
+};
+
enum igc_mac_filter_type {
IGC_MAC_FILTER_TYPE_DST = 0,
IGC_MAC_FILTER_TYPE_SRC
@@ -333,6 +338,8 @@ struct igc_adapter {
struct timespec64 period;
} perout[IGC_N_PEROUT];
+ struct igc_fpe_t fpe;
+
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
@@ -387,11 +394,11 @@ extern char igc_driver_name[];
#define IGC_FLAG_RX_LEGACY BIT(16)
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
-#define IGC_FLAG_TSN_LEGACY_ENABLED BIT(19)
+#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
- IGC_FLAG_TSN_LEGACY_ENABLED)
+ IGC_FLAG_TSN_PREEMPT_ENABLED)
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
@@ -736,7 +743,10 @@ struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
u32 location);
int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule);
-
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter);
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter);
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu);
+void igc_flush_tx_descriptors(struct igc_ring *ring);
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index bf8cdfbba9ff..6320eabb72fe 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -49,6 +49,7 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IGC_ADVTXD_PAYLEN_MASK 0XFFFFC000 /* Adv desc PAYLEN mask */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index d19325b0e6e0..7189dfc389ad 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -308,6 +308,8 @@
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IGC_TXD_POPTS_SMD_MASK 0x3000 /* Indicates whether it's SMD-V or SMD-R */
+
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
@@ -363,6 +365,8 @@
#define IGC_SRRCTL_TIMER0SEL(timer) (((timer) & 0x3) << 17)
/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_SMD_TYPE_V 0x01 /* SMD-V Packet */
+#define IGC_RXD_STAT_SMD_TYPE_R 0x02 /* SMD-R Packet */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
@@ -372,7 +376,8 @@
#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
-#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define IGC_RXDADV_STAT_SMD_TYPE_MASK 0x06000
+#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
@@ -396,11 +401,47 @@
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
-#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
-#define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
-
-#define IGC_TXPBSIZE_TSN 0x04145145 /* 5k bytes buffer for each queue */
+/* Mask for RX packet buffer size */
+#define IGC_RXPBSIZE_EXP_MASK GENMASK(5, 0)
+#define IGC_BMC2OSPBSIZE_MASK GENMASK(11, 6)
+#define IGC_RXPBSIZE_BE_MASK GENMASK(17, 12)
+/* Mask for timestamp in RX buffer */
+#define IGC_RXPBS_CFG_TS_EN_MASK GENMASK(31, 31)
+/* High-priority RX packet buffer size (KB). Used for Express traffic when preemption is enabled */
+#define IGC_RXPBSIZE_EXP(x) FIELD_PREP(IGC_RXPBSIZE_EXP_MASK, (x))
+/* BMC to OS packet buffer size in KB */
+#define IGC_BMC2OSPBSIZE(x) FIELD_PREP(IGC_BMC2OSPBSIZE_MASK, (x))
+/* Low-priority RX packet buffer size (KB). Used for BE traffic when preemption is enabled */
+#define IGC_RXPBSIZE_BE(x) FIELD_PREP(IGC_RXPBSIZE_BE_MASK, (x))
+/* Enable RX packet buffer for timestamp descriptor, saving 16 bytes per packet if set */
+#define IGC_RXPBS_CFG_TS_EN FIELD_PREP(IGC_RXPBS_CFG_TS_EN_MASK, 1)
+/* Default value following I225/I226 SW User Manual Section 8.3.1 */
+#define IGC_RXPBSIZE_EXP_BMC_DEFAULT ( \
+ IGC_RXPBSIZE_EXP(34) | IGC_BMC2OSPBSIZE(2))
+#define IGC_RXPBSIZE_EXP_BMC_BE_TSN ( \
+ IGC_RXPBSIZE_EXP(15) | IGC_BMC2OSPBSIZE(2) | IGC_RXPBSIZE_BE(15))
+
+/* Mask for TX packet buffer size */
+#define IGC_TXPB0SIZE_MASK GENMASK(5, 0)
+#define IGC_TXPB1SIZE_MASK GENMASK(11, 6)
+#define IGC_TXPB2SIZE_MASK GENMASK(17, 12)
+#define IGC_TXPB3SIZE_MASK GENMASK(23, 18)
+/* Mask for OS to BMC packet buffer size */
+#define IGC_OS2BMCPBSIZE_MASK GENMASK(29, 24)
+/* TX Packet buffer size in KB */
+#define IGC_TXPB0SIZE(x) FIELD_PREP(IGC_TXPB0SIZE_MASK, (x))
+#define IGC_TXPB1SIZE(x) FIELD_PREP(IGC_TXPB1SIZE_MASK, (x))
+#define IGC_TXPB2SIZE(x) FIELD_PREP(IGC_TXPB2SIZE_MASK, (x))
+#define IGC_TXPB3SIZE(x) FIELD_PREP(IGC_TXPB3SIZE_MASK, (x))
+/* OS to BMC packet buffer size in KB */
+#define IGC_OS2BMCPBSIZE(x) FIELD_PREP(IGC_OS2BMCPBSIZE_MASK, (x))
+/* Default value following I225/I226 SW User Manual Section 8.3.2 */
+#define IGC_TXPBSIZE_DEFAULT ( \
+ IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
+ IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+#define IGC_TXPBSIZE_TSN ( \
+ IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
+ IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
@@ -539,8 +580,10 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
+#define IGC_TQAVCTRL_PREEMPT_ENA 0x00000002
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
+#define IGC_TQAVCTRL_MIN_FRAG_MASK 0x0000C000
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 817838677817..3fc1eded9605 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -8,6 +8,7 @@
#include "igc.h"
#include "igc_diag.h"
+#include "igc_tsn.h"
/* forward declaration */
struct igc_stats {
@@ -1781,6 +1782,83 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
+static int igc_ethtool_get_mm(struct net_device *netdev,
+ struct ethtool_mm_state *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ ethtool_mmsv_get_mm(&fpe->mmsv, cmd);
+ cmd->tx_min_frag_size = fpe->tx_min_frag_size;
+ cmd->rx_min_frag_size = IGC_RX_MIN_FRAG_SIZE;
+
+ return 0;
+}
+
+static int igc_ethtool_set_mm(struct net_device *netdev,
+ struct ethtool_mm_cfg *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_fpe_t *fpe = &adapter->fpe;
+
+ fpe->tx_min_frag_size = igc_fpe_get_supported_frag_size(cmd->tx_min_frag_size);
+ if (fpe->tx_min_frag_size != cmd->tx_min_frag_size)
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-min-frag-size value set is unsupported. Rounded up to supported value (64, 128, 192, 256)");
+
+ if (fpe->mmsv.pmac_enabled != cmd->pmac_enabled) {
+ if (cmd->pmac_enabled)
+ static_branch_inc(&igc_fpe_enabled);
+ else
+ static_branch_dec(&igc_fpe_enabled);
+ }
+
+ ethtool_mmsv_set_mm(&fpe->mmsv, cmd);
+
+ return igc_tsn_offload_apply(adapter);
+}
+
+/**
+ * igc_ethtool_get_frame_ass_error - Get the frame assembly error count.
+ * @reg_value: Register value for IGC_PRMEXCPRCNT
+ * Return: The count of frame assembly errors.
+ */
+static u64 igc_ethtool_get_frame_ass_error(u32 reg_value)
+{
+ /* Out of order statistics */
+ u32 ooo_frame_cnt, ooo_frag_cnt;
+ u32 miss_frame_frag_cnt;
+
+ ooo_frame_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAME_CNT, reg_value);
+ ooo_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_OOO_FRAG_CNT, reg_value);
+ miss_frame_frag_cnt = FIELD_GET(IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT,
+ reg_value);
+
+ return ooo_frame_cnt + ooo_frag_cnt + miss_frame_frag_cnt;
+}
+
+static u64 igc_ethtool_get_frame_smd_error(u32 reg_value)
+{
+ return FIELD_GET(IGC_PRMEXCPRCNT_OOO_SMDC, reg_value);
+}
+
+static void igc_ethtool_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 reg_value;
+
+ reg_value = rd32(IGC_PRMEXCPRCNT);
+
+ stats->MACMergeFrameAssErrorCount = igc_ethtool_get_frame_ass_error(reg_value);
+ stats->MACMergeFrameSmdErrorCount = igc_ethtool_get_frame_smd_error(reg_value);
+ stats->MACMergeFrameAssOkCount = rd32(IGC_PRMPTDRCNT);
+ stats->MACMergeFragCountRx = rd32(IGC_PRMEVNTRCNT);
+ stats->MACMergeFragCountTx = rd32(IGC_PRMEVNTTCNT);
+}
+
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -2076,6 +2154,9 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
+ .get_mm = igc_ethtool_get_mm,
+ .get_mm_stats = igc_ethtool_get_mm_stats,
+ .set_mm = igc_ethtool_set_mm,
};
void igc_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b1669d7cf435..27575a1e1777 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2464,8 +2464,7 @@ unmap:
return -ENOMEM;
}
-static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
- int cpu)
+struct igc_ring *igc_get_tx_ring(struct igc_adapter *adapter, int cpu)
{
int index = cpu;
@@ -2489,7 +2488,7 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
if (unlikely(!xdpf))
return -EFAULT;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2549,7 +2548,7 @@ out:
}
/* This function assumes __netif_tx_lock is held by the caller. */
-static void igc_flush_tx_descriptors(struct igc_ring *ring)
+void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
@@ -2566,7 +2565,7 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
struct igc_ring *ring;
if (status & IGC_XDP_TX) {
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -2638,6 +2637,14 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
size -= IGC_TS_HDR_LEN;
}
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_handle_mpacket(adapter, rx_desc, size, pktbuf)) {
+ /* Advance the ring next-to-clean */
+ igc_is_non_eop(rx_ring, rx_desc);
+ cleaned_count++;
+ continue;
+ }
+
if (!skb) {
xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
@@ -3145,6 +3152,11 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ if (igc_fpe_is_pmac_enabled(adapter) &&
+ igc_fpe_transmitted_smd_v(tx_desc))
+ ethtool_mmsv_event_handle(&adapter->fpe.mmsv,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
+
/* Hold the completions while there's a pending tx hardware
* timestamp request from XDP Tx metadata.
*/
@@ -4037,6 +4049,30 @@ static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
}
/**
+ * igc_enable_empty_addr_recv - Enable Rx of packets with all-zeroes MAC address
+ * @adapter: Pointer to the igc_adapter structure.
+ *
+ * Frame preemption verification requires that packets with the all-zeroes
+ * MAC address are allowed to be received by the driver. This function adds the
+ * all-zeroes destination address to the list of acceptable addresses.
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+int igc_enable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty, -1);
+}
+
+void igc_disable_empty_addr_recv(struct igc_adapter *adapter)
+{
+ u8 empty[ETH_ALEN] = {};
+
+ igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, empty);
+}
+
+/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -5311,6 +5347,9 @@ void igc_down(struct igc_adapter *adapter)
igc_disable_all_tx_rings_hw(adapter);
igc_clean_all_tx_rings(adapter);
igc_clean_all_rx_rings(adapter);
+
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_stop(&adapter->fpe.mmsv);
}
void igc_reinit_locked(struct igc_adapter *adapter)
@@ -5835,6 +5874,10 @@ static void igc_watchdog_task(struct work_struct *work)
*/
igc_tsn_adjust_txtime_offset(adapter);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ true);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
@@ -5870,6 +5913,10 @@ no_wait:
netdev_info(netdev, "NIC Link is Down\n");
netif_carrier_off(netdev);
+ if (adapter->fpe.mmsv.pmac_enabled)
+ ethtool_mmsv_link_state_handle(&adapter->fpe.mmsv,
+ false);
+
/* link state has changed, schedule phy info update */
if (!test_bit(__IGC_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
@@ -6439,6 +6486,10 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ /* preemptible isn't supported yet */
+ if (qopt->mqprio.preemptible_tcs)
+ return -EOPNOTSUPP;
+
igc_ptp_read(adapter, &now);
if (igc_tsn_is_taprio_activated_by_user(adapter) &&
@@ -6679,13 +6730,14 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
struct igc_hw *hw = &adapter->hw;
- int i;
+ int err, i;
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
+ netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6716,6 +6768,21 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
igc_save_mqprio_params(adapter, mqprio->qopt.num_tc,
mqprio->qopt.offset);
+ err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
+ if (err)
+ return err;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ err = netdev_set_tc_queue(adapter->netdev, i, 1,
+ adapter->queue_per_tc[i]);
+ if (err)
+ return err;
+ }
+
+ /* In case the card is configured with less than four queues. */
+ for (; i < IGC_MAX_TX_QUEUES; i++)
+ adapter->queue_per_tc[i] = i;
+
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
apply:
@@ -6779,7 +6846,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- ring = igc_xdp_get_tx_ring(adapter, cpu);
+ ring = igc_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
@@ -7125,6 +7192,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ /* enable HW vlan tag insertion/stripping by default */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -7157,8 +7227,8 @@ static int igc_probe(struct pci_dev *pdev,
}
/* configure RXPBSIZE and TXPBSIZE */
- wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_RXPBS, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
@@ -7190,6 +7260,8 @@ static int igc_probe(struct pci_dev *pdev,
igc_tsn_clear_schedule(adapter);
+ igc_fpe_init(adapter);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index efc7b30e4211..f4f5c28615d3 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -257,13 +257,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests failing to enable both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -300,10 +293,6 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
-
if (on) {
pin = ptp_find_pin(igc->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
@@ -1162,6 +1151,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.n_ext_ts = IGC_N_EXTTS;
adapter->ptp_caps.n_per_out = IGC_N_PEROUT;
+ adapter->ptp_caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
adapter->ptp_caps.n_pins = IGC_N_SDP;
adapter->ptp_caps.verify = igc_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 12ddc5793651..f343c6bfc6be 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -222,6 +222,22 @@
#define IGC_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+/* Time sync registers - preemption statistics */
+#define IGC_PRMPTDRCNT 0x04284 /* Good RX Preempted Packets */
+#define IGC_PRMEVNTTCNT 0x04298 /* TX Preemption event counter */
+#define IGC_PRMEVNTRCNT 0x0429C /* RX Preemption event counter */
+
+ /* Preemption Exception Counter */
+ #define IGC_PRMEXCPRCNT 0x42A0
+/* Received out of order packets with SMD-C */
+#define IGC_PRMEXCPRCNT_OOO_SMDC 0x000000FF
+/* Received out of order packets with SMD-C and wrong Frame CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAME_CNT 0x0000FF00
+/* Received out of order packets with SMD-C and wrong Frag CNT */
+#define IGC_PRMEXCPRCNT_OOO_FRAG_CNT 0x00FF0000
+/* Received packets with SMD-S and wrong Frag CNT and Frame CNT */
+#define IGC_PRMEXCPRCNT_MISS_FRAME_FRAG_CNT 0xFF000000
+
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 1e44374ca1ff..f22cc4d4f459 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,9 +2,143 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_base.h"
#include "igc_hw.h"
#include "igc_tsn.h"
+#define MIN_MULTPLIER_TX_MIN_FRAG 0
+#define MAX_MULTPLIER_TX_MIN_FRAG 3
+/* Frag size is based on the Section 8.12.2 of the SW User Manual */
+#define TX_MIN_FRAG_SIZE 64
+#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
+ (MAX_MULTPLIER_TX_MIN_FRAG + 1))
+
+DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+static int igc_fpe_init_smd_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ dma_addr_t dma = dma_map_single(ring->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_fpe_init_tx_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ enum igc_txd_popts_type type)
+{
+ u32 cmd_type, olinfo_status = 0;
+ struct igc_tx_buffer *buffer;
+ union igc_adv_tx_desc *desc;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ buffer = &ring->tx_buffer_info[ring->next_to_use];
+ err = igc_fpe_init_smd_frame(ring, buffer, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ buffer->bytecount;
+
+ olinfo_status |= FIELD_PREP(IGC_ADVTXD_PAYLEN_MASK, buffer->bytecount);
+
+ switch (type) {
+ case SMD_V:
+ case SMD_R:
+ olinfo_status |= FIELD_PREP(IGC_TXD_POPTS_SMD_MASK, type);
+ break;
+ }
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ buffer->next_to_watch = desc;
+ ring->next_to_use = (ring->next_to_use + 1) % ring->count;
+
+ return 0;
+}
+
+static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
+ enum igc_txd_popts_type type)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct igc_ring *ring;
+ struct sk_buff *skb;
+ int err;
+
+ ring = igc_get_tx_ring(adapter, cpu);
+ nq = txring_txq(ring);
+
+ skb = alloc_skb(SMD_FRAME_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_zero(skb, SMD_FRAME_SIZE);
+
+ __netif_tx_lock(nq, cpu);
+
+ err = igc_fpe_init_tx_descriptor(ring, skb, type);
+ igc_flush_tx_descriptors(ring);
+
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+ int err;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+
+ if (type == ETHTOOL_MPACKET_VERIFY) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_V);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-V\n");
+ } else if (type == ETHTOOL_MPACKET_RESPONSE) {
+ err = igc_fpe_xmit_smd_frame(adapter, SMD_R);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error sending SMD-R frame\n");
+ }
+}
+
+static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .send_mpacket = igc_fpe_send_mpacket,
+};
+
+void igc_fpe_init(struct igc_adapter *adapter)
+{
+ adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -37,17 +171,16 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
- if (adapter->taprio_offload_enable)
- new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
- if (is_any_launchtime(adapter))
+ if (adapter->taprio_offload_enable || is_any_launchtime(adapter) ||
+ adapter->strict_priority_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_cbs_enabled(adapter))
new_flags |= IGC_FLAG_TSN_QAV_ENABLED;
- if (adapter->strict_priority_enable)
- new_flags |= IGC_FLAG_TSN_LEGACY_ENABLED;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ new_flags |= IGC_FLAG_TSN_PREEMPT_ENABLED;
return new_flags;
}
@@ -125,6 +258,29 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
wr32(IGC_TXARB, txarb);
}
+/**
+ * igc_tsn_set_rxpbsize - Set the receive packet buffer size
+ * @adapter: Pointer to the igc_adapter structure
+ * @rxpbs_exp_bmc_be: Value to set the receive packet buffer size, including
+ * express buffer, BMC buffer, and Best Effort buffer
+ *
+ * The IGC_RXPBS register value may include allocations for the Express buffer,
+ * BMC buffer, Best Effort buffer, and the timestamp descriptor buffer
+ * (IGC_RXPBS_CFG_TS_EN).
+ */
+static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
+ u32 rxpbs_exp_bmc_be)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 rxpbs = rd32(IGC_RXPBS);
+
+ rxpbs &= ~(IGC_RXPBSIZE_EXP_MASK | IGC_BMC2OSPBSIZE_MASK |
+ IGC_RXPBSIZE_BE_MASK);
+ rxpbs |= rxpbs_exp_bmc_be;
+
+ wr32(IGC_RXPBS, rxpbs);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -136,15 +292,18 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
int i;
wr32(IGC_GTXOFFSET, 0);
- wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
+ wr32(IGC_TXPBS, IGC_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_DEFAULT);
+
if (igc_is_device_id_i226(hw))
igc_tsn_restore_retx_default(adapter);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
wr32(IGC_TQAVCTRL, tqavctrl);
@@ -157,16 +316,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_QBVCYCLET_S, 0);
wr32(IGC_QBVCYCLET, NSEC_PER_SEC);
- /* Reset mqprio TC configuration. */
- netdev_reset_tc(adapter->netdev);
-
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
igc_tsn_tx_arb(adapter, queue_per_tc);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
- adapter->flags &= ~IGC_FLAG_TSN_LEGACY_ENABLED;
return 0;
}
@@ -190,53 +345,51 @@ static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
wr32(IGC_RETX_CTL, retxctl);
}
+static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
+{
+ u8 mult = (fpe->tx_min_frag_size / TX_MIN_FRAG_SIZE) - 1;
+
+ return clamp_t(u8, mult, MIN_MULTPLIER_TX_MIN_FRAG,
+ MAX_MULTPLIER_TX_MIN_FRAG);
+}
+
+u32 igc_fpe_get_supported_frag_size(u32 frag_size)
+{
+ const u32 supported_sizes[] = {64, 128, 192, 256};
+
+ /* Find the smallest supported size that is >= frag_size */
+ for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
+ if (frag_size <= supported_sizes[i])
+ return supported_sizes[i];
+ }
+
+ /* Should not happen */
+ return TX_MAX_FRAG_SIZE;
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
+ u32 frag_size_mult;
int i;
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ igc_tsn_set_rxpbsize(adapter, IGC_RXPBSIZE_EXP_BMC_BE_TSN);
+
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
if (adapter->strict_priority_enable) {
- int err;
-
- err = netdev_set_num_tc(adapter->netdev, adapter->num_tc);
- if (err)
- return err;
-
- for (i = 0; i < adapter->num_tc; i++) {
- err = netdev_set_tc_queue(adapter->netdev, i, 1,
- adapter->queue_per_tc[i]);
- if (err)
- return err;
- }
-
- /* In case the card is configured with less than four queues. */
- for (; i < IGC_MAX_TX_QUEUES; i++)
- adapter->queue_per_tc[i] = i;
-
/* Configure queue priorities according to the user provided
* mapping.
*/
igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
-
- /* Enable legacy TSN mode which will do strict priority without
- * any other TSN features.
- */
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN;
- tqavctrl &= ~IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- return 0;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -361,10 +514,16 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
- tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
-
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~(IGC_TQAVCTRL_FUTSCDDIS |
+ IGC_TQAVCTRL_PREEMPT_ENA | IGC_TQAVCTRL_MIN_FRAG_MASK);
tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+ if (adapter->fpe.mmsv.pmac_enabled)
+ tqavctrl |= IGC_TQAVCTRL_PREEMPT_ENA;
+
+ frag_size_mult = igc_fpe_get_frag_size_mult(&adapter->fpe);
+ tqavctrl |= FIELD_PREP(IGC_TQAVCTRL_MIN_FRAG_MASK, frag_size_mult);
+
adapter->qbv_count++;
cycle = adapter->cycle_time;
@@ -425,6 +584,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
unsigned int new_flags;
int err = 0;
+ if (adapter->fpe.mmsv.pmac_enabled) {
+ err = igc_enable_empty_addr_recv(adapter);
+ if (err && net_ratelimit())
+ netdev_err(adapter->netdev, "Error adding empty address to MAC filter\n");
+ } else {
+ igc_disable_empty_addr_recv(adapter);
+ }
+
new_flags = igc_tsn_new_flags(adapter);
if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED))
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index 98ec845a86bf..c2a77229207b 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,9 +4,61 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#define IGC_RX_MIN_FRAG_SIZE 60
+#define SMD_FRAME_SIZE 60
+
+enum igc_txd_popts_type {
+ SMD_V = 0x01,
+ SMD_R = 0x02,
+};
+
+DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
+
+void igc_fpe_init(struct igc_adapter *adapter);
+u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
+static inline bool igc_fpe_is_pmac_enabled(struct igc_adapter *adapter)
+{
+ return static_branch_unlikely(&igc_fpe_enabled) &&
+ adapter->fpe.mmsv.pmac_enabled;
+}
+
+static inline bool igc_fpe_handle_mpacket(struct igc_adapter *adapter,
+ union igc_adv_rx_desc *rx_desc,
+ unsigned int size, void *pktbuf)
+{
+ u32 status_error = le32_to_cpu(rx_desc->wb.upper.status_error);
+ int smd;
+
+ smd = FIELD_GET(IGC_RXDADV_STAT_SMD_TYPE_MASK, status_error);
+ if (smd != IGC_RXD_STAT_SMD_TYPE_V && smd != IGC_RXD_STAT_SMD_TYPE_R)
+ return false;
+
+ if (size == SMD_FRAME_SIZE && mem_is_zero(pktbuf, SMD_FRAME_SIZE)) {
+ struct ethtool_mmsv *mmsv = &adapter->fpe.mmsv;
+ enum ethtool_mmsv_event event;
+
+ if (smd == IGC_RXD_STAT_SMD_TYPE_V)
+ event = ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET;
+ else
+ event = ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET;
+
+ ethtool_mmsv_event_handle(mmsv, event);
+ }
+
+ return true;
+}
+
+static inline bool igc_fpe_transmitted_smd_v(union igc_adv_tx_desc *tx_desc)
+{
+ u32 olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
+ u8 smd = FIELD_GET(IGC_TXD_POPTS_SMD_MASK, olinfo_status);
+
+ return smd == SMD_V;
+}
+
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index b456d102655a..2e7738f41c58 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -4,12 +4,14 @@
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o ixgbe_e610.o
+ ixgbe_xsk.o ixgbe_e610.o devlink/devlink.o ixgbe_fw_update.o \
+ devlink/region.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
new file mode 100644
index 000000000000..54f1b83dfe42
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_info_ctx {
+ char buf[128];
+ struct ixgbe_orom_info pending_orom;
+ struct ixgbe_nvm_info pending_nvm;
+ struct ixgbe_netlist_info pending_netlist;
+ struct ixgbe_hw_dev_caps dev_caps;
+};
+
+enum ixgbe_devlink_version_type {
+ IXGBE_DL_VERSION_RUNNING,
+ IXGBE_DL_VERSION_STORED
+};
+
+static void ixgbe_info_get_dsn(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(adapter->pdev), dsn);
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
+}
+
+static void ixgbe_info_orom_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ ctx->buf[0] = '\0';
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+ if (nvm_ver.oem_valid) {
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x",
+ nvm_ver.oem_major, nvm_ver.oem_minor,
+ nvm_ver.oem_release);
+
+ return;
+ }
+
+ ixgbe_get_orom_version(hw, &nvm_ver);
+ if (nvm_ver.or_valid)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%d.%d.%d",
+ nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+}
+
+static void ixgbe_info_eetrack(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_nvm_version nvm_ver;
+
+ if (hw->mac.type == ixgbe_mac_e610) {
+ u32 eetrack = hw->flash.nvm.eetrack;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ eetrack = ctx->pending_nvm.eetrack;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", eetrack);
+ return;
+ }
+
+ ixgbe_get_oem_prod_version(hw, &nvm_ver);
+
+ /* No ETRACK version for OEM */
+ if (nvm_ver.oem_valid) {
+ ctx->buf[0] = '\0';
+ return;
+ }
+
+ ixgbe_get_etk_id(hw, &nvm_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm_ver.etk_id);
+}
+
+static void ixgbe_info_fw_api(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->api_maj_ver, hw->api_min_ver, hw->api_patch);
+}
+
+static void ixgbe_info_fw_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+}
+
+static void ixgbe_info_fw_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", nvm->srev);
+}
+
+static void ixgbe_info_orom_srev(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ orom = &ctx->pending_orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", orom->srev);
+}
+
+static void ixgbe_info_nvm_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ nvm = &ctx->pending_nvm;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
+}
+
+static void ixgbe_info_netlist_ver(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ /* The netlist version fields are BCD formatted */
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
+}
+
+static void ixgbe_info_netlist_build(struct ixgbe_adapter *adapter,
+ struct ixgbe_info_ctx *ctx,
+ enum ixgbe_devlink_version_type type)
+{
+ struct ixgbe_netlist_info *netlist = &adapter->hw.flash.netlist;
+
+ if (type == IXGBE_DL_VERSION_STORED &&
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ netlist = &ctx->pending_netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+}
+
+static int ixgbe_set_ctx_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_info_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ bool *pending_orom, *pending_nvm, *pending_netlist;
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to discover device capabilities");
+ return err;
+ }
+
+ pending_orom = &ctx->dev_caps.common_cap.nvm_update_pending_orom;
+ pending_nvm = &ctx->dev_caps.common_cap.nvm_update_pending_nvm;
+ pending_netlist = &ctx->dev_caps.common_cap.nvm_update_pending_netlist;
+
+ if (*pending_orom) {
+ err = ixgbe_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err)
+ *pending_orom = false;
+ }
+
+ if (*pending_nvm) {
+ err = ixgbe_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err)
+ *pending_nvm = false;
+ }
+
+ if (*pending_netlist) {
+ err = ixgbe_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err)
+ *pending_netlist = false;
+ }
+
+ return 0;
+}
+
+static int ixgbe_devlink_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_fw_api(adapter, ctx);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_build(adapter, ctx);
+ err = devlink_info_version_running_put(req, "fw.mgmt.build", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ return devlink_info_version_running_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int
+ixgbe_devlink_pending_info_get_e610(struct ixgbe_adapter *adapter,
+ struct devlink_info_req *req,
+ struct ixgbe_info_ctx *ctx)
+{
+ int err;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_fw_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.mgmt.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_orom_srev(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.undi.srev", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_nvm_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.psid.api", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_ver(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ err = devlink_info_version_stored_put(req, "fw.netlist", ctx->buf);
+ if (err)
+ return err;
+
+ ixgbe_info_netlist_build(adapter, ctx, IXGBE_DL_VERSION_STORED);
+ return devlink_info_version_stored_put(req, "fw.netlist.build",
+ ctx->buf);
+}
+
+static int ixgbe_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_info_ctx *ctx;
+ int err;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
+
+ ixgbe_info_get_dsn(adapter, ctx);
+ err = devlink_info_serial_number_put(req, ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ err = hw->eeprom.ops.read_pba_string(hw, ctx->buf, sizeof(ctx->buf));
+ if (err)
+ goto free_ctx;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_orom_ver(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI,
+ ctx->buf);
+ if (err)
+ goto free_ctx;
+
+ ixgbe_info_eetrack(adapter, ctx, IXGBE_DL_VERSION_RUNNING);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ ctx->buf);
+ if (err || hw->mac.type != ixgbe_mac_e610)
+ goto free_ctx;
+
+ err = ixgbe_set_ctx_dev_caps(hw, ctx, extack);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_info_get_e610(adapter, req, ctx);
+ if (err)
+ goto free_ctx;
+
+ err = ixgbe_devlink_pending_info_get_e610(adapter, req, ctx);
+free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+/**
+ * ixgbe_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ *
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
+ *
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_devlink_reload_empr_start(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* Pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
+ }
+
+ if (adapter->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
+ }
+
+ err = ixgbe_aci_nvm_update_empr(hw);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to trigger EMP device reset to reload firmware");
+
+ return err;
+}
+
+/*Wait for 10 sec with 0.5 sec tic. EMPR takes no less than half of a sec */
+#define IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC 20
+
+/**
+ * ixgbe_devlink_reload_empr_finish - finishes EMP reset
+ * @devlink: pointer to the devlink instance
+ * @action: the action to perform.
+ * @limit: limits on what reload should do
+ * @actions_performed: actions performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for new NVM to be loaded during EMP reset.
+ *
+ * Return: -ETIME when timer is exceeded, 0 on success.
+ */
+static int ixgbe_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i = 0;
+ u32 fwsm;
+
+ do {
+ /* Just right away after triggering EMP reset the FWSM register
+ * may be not cleared yet, so begin the loop with the delay
+ * in order to not check the not updated register.
+ */
+ mdelay(500);
+
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+
+ if (i++ >= IXGBE_DEVLINK_RELOAD_TIMEOUT_SEC)
+ return -ETIME;
+
+ } while (!(fwsm & IXGBE_FWSM_FW_VAL_BIT));
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH |
+ IXGBE_FLAG2_FW_ROLLBACK);
+
+ return 0;
+}
+
+static const struct devlink_ops ixgbe_devlink_ops = {
+ .info_get = ixgbe_devlink_info_get,
+ .supported_flash_update_params =
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .flash_update = ixgbe_flash_pldm_image,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = ixgbe_devlink_reload_empr_start,
+ .reload_up = ixgbe_devlink_reload_empr_finish,
+};
+
+/**
+ * ixgbe_allocate_devlink - Allocate devlink instance
+ * @dev: device to allocate devlink for
+ *
+ * Allocate a devlink instance for this physical function.
+ *
+ * Return: pointer to the device adapter structure on success,
+ * ERR_PTR(-ENOMEM) when allocation failed.
+ */
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev)
+{
+ struct ixgbe_adapter *adapter;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ixgbe_devlink_ops, sizeof(*adapter), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ adapter = devlink_priv(devlink);
+ adapter->devlink = devlink;
+
+ return adapter;
+}
+
+/**
+ * ixgbe_devlink_set_switch_id - Set unique switch ID based on PCI DSN
+ * @adapter: pointer to the device adapter structure
+ * @ppid: struct with switch id information
+ */
+static void ixgbe_devlink_set_switch_id(struct ixgbe_adapter *adapter,
+ struct netdev_phys_item_id *ppid)
+{
+ u64 id = pci_get_dsn(adapter->pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ixgbe_devlink_register_port - Register devlink port
+ * @adapter: pointer to the device adapter structure
+ *
+ * Create and register a devlink_port for this physical function.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
+{
+ struct devlink_port *devlink_port = &adapter->devlink_port;
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = adapter->hw.bus.func;
+ ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register(devlink, devlink_port, 0);
+ if (err) {
+ dev_err(dev,
+ "devlink port registration failed, err %d\n", err);
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
new file mode 100644
index 000000000000..381558058048
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _IXGBE_DEVLINK_H_
+#define _IXGBE_DEVLINK_H_
+
+struct ixgbe_adapter *ixgbe_allocate_devlink(struct device *dev);
+int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter);
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/region.c b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
new file mode 100644
index 000000000000..76f6571c3c34
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/region.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ixgbe.h"
+#include "devlink.h"
+
+#define IXGBE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops;
+static const struct devlink_region_ops ixgbe_sram_region_ops;
+
+static int ixgbe_devlink_parse_region(struct ixgbe_hw *hw,
+ const struct devlink_region_ops *ops,
+ bool *read_shadow_ram, u32 *nvm_size)
+{
+ if (ops == &ixgbe_nvm_region_ops) {
+ *read_shadow_ram = false;
+ *nvm_size = hw->flash.flash_size;
+ } else if (ops == &ixgbe_sram_region_ops) {
+ *read_shadow_ram = true;
+ *nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_snapshot - Capture a snapshot of the NVM content
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW cmd.
+ *
+ * Capture a snapshot of the whole requested NVM region.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ENOMEM when cannot alloc mem and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u8 *nvm_data, *buf;
+ u32 nvm_size, left;
+ u8 num_blks;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ nvm_data = kvzalloc(nvm_size, GFP_KERNEL);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ num_blks = DIV_ROUND_UP(nvm_size, IXGBE_DEVLINK_READ_BLK_SIZE);
+ buf = nvm_data;
+ left = nvm_size;
+
+ for (int i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, IXGBE_DEVLINK_READ_BLK_SIZE, left);
+
+ /* Need to acquire NVM lock during each loop run because the
+ * total period of reading whole NVM is longer than the maximum
+ * period the lock can be taken defined by the IXGBE_NVM_TIMEOUT.
+ */
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire NVM semaphore");
+ kvfree(nvm_data);
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, i * IXGBE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, buf, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read RAM content");
+ ixgbe_release_nvm(hw);
+ kvfree(nvm_data);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+
+ buf += read_sz;
+ left -= read_sz;
+ }
+
+ *data = nvm_data;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_devcaps_snapshot - Capture a snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_NEW for
+ * the device-caps devlink region.
+ *
+ * Capture a snapshot of the device capabilities reported by firmware.
+ *
+ * No need to worry with freeing @data, devlink core takes care if it.
+ *
+ * Return: 0 on success, -ENOMEM when cannot alloc mem, or return code of
+ * the reading operation.
+ */
+static int ixgbe_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_aci_cmd_list_caps_elem *caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ caps = kvzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_list_caps(hw, caps, IXGBE_ACI_MAX_BUFFER_SIZE, NULL,
+ ixgbe_aci_opc_list_dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read device capabilities");
+ kvfree(caps);
+ return err;
+ }
+
+ *data = (u8 *)caps;
+ return 0;
+}
+
+/**
+ * ixgbe_devlink_nvm_read - Read a portion of NVM flash content
+ * @devlink: the devlink instance
+ * @ops: the devlink region to snapshot
+ * @extack: extended ACK response structure
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
+ *
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * Read from either the nvm-flash region either shadow-ram region.
+ *
+ * Return: 0 on success, -EOPNOTSUPP for unsupported regions, -EBUSY when
+ * cannot lock NVM, -ERANGE when buffer limit exceeded and -EIO when error
+ * occurs during reading.
+ */
+static int ixgbe_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool read_shadow_ram;
+ u32 nvm_size;
+ int err;
+
+ err = ixgbe_devlink_parse_region(hw, ops, &read_shadow_ram, &nvm_size);
+ if (err)
+ return err;
+
+ if (offset + size > nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EBUSY;
+ }
+
+ err = ixgbe_read_flat_nvm(hw, (u32)offset, &size, data, read_shadow_ram);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ixgbe_release_nvm(hw);
+ return -EIO;
+ }
+
+ ixgbe_release_nvm(hw);
+ return 0;
+}
+
+static const struct devlink_region_ops ixgbe_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_nvm_snapshot,
+ .read = ixgbe_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ixgbe_devcaps_region_ops = {
+ .name = "device-caps",
+ .destructor = kvfree,
+ .snapshot = ixgbe_devlink_devcaps_snapshot,
+};
+
+/**
+ * ixgbe_devlink_init_regions - Initialize devlink regions
+ * @adapter: adapter instance
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory of the device.
+ */
+void ixgbe_devlink_init_regions(struct ixgbe_adapter *adapter)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct device *dev = &adapter->pdev->dev;
+ u64 nvm_size, sram_size;
+
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ nvm_size = adapter->hw.flash.flash_size;
+ adapter->nvm_region = devl_region_create(devlink, &ixgbe_nvm_region_ops,
+ 1, nvm_size);
+ if (IS_ERR(adapter->nvm_region)) {
+ dev_err(dev,
+ "Failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(adapter->nvm_region));
+ adapter->nvm_region = NULL;
+ }
+
+ sram_size = adapter->hw.flash.sr_words * 2u;
+ adapter->sram_region = devl_region_create(devlink, &ixgbe_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(adapter->sram_region)) {
+ dev_err(dev,
+ "Failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(adapter->sram_region));
+ adapter->sram_region = NULL;
+ }
+
+ adapter->devcaps_region = devl_region_create(devlink,
+ &ixgbe_devcaps_region_ops,
+ 10, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (IS_ERR(adapter->devcaps_region)) {
+ dev_err(dev,
+ "Failed to create device-caps devlink region, err %ld\n",
+ PTR_ERR(adapter->devcaps_region));
+ adapter->devcaps_region = NULL;
+ }
+}
+
+/**
+ * ixgbe_devlink_destroy_regions - Destroy devlink regions
+ * @adapter: adapter instance
+ *
+ * Remove previously created regions for this adapter instance.
+ */
+void ixgbe_devlink_destroy_regions(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type != ixgbe_mac_e610)
+ return;
+
+ if (adapter->nvm_region)
+ devl_region_destroy(adapter->nvm_region);
+
+ if (adapter->sram_region)
+ devl_region_destroy(adapter->sram_region);
+
+ if (adapter->devcaps_region)
+ devl_region_destroy(adapter->devcaps_region);
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e6a380d4929b..47311b134a7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -17,6 +17,8 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/devlink.h>
+
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
@@ -612,6 +614,11 @@ struct ixgbe_adapter {
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+ struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
+ struct devlink_region *devcaps_region;
unsigned long state;
@@ -667,6 +674,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
#define IXGBE_FLAG2_NO_MEDIA BIT(21)
#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
+#define IXGBE_FLAG2_API_MISMATCH BIT(23)
+#define IXGBE_FLAG2_FW_ROLLBACK BIT(24)
/* Tx fast path data */
int num_tx_queues;
@@ -755,6 +764,8 @@ struct ixgbe_adapter {
u32 atr_sample_rate;
spinlock_t fdir_perfect_lock;
+ bool fw_emp_reset_disabled;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
@@ -830,6 +841,17 @@ struct ixgbe_adapter {
spinlock_t vfs_lock;
};
+struct ixgbe_netdevice_priv {
+ struct ixgbe_adapter *adapter;
+};
+
+static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev)
+{
+ struct ixgbe_netdevice_priv *priv = netdev_priv(netdev);
+
+ return priv->adapter;
+}
+
static inline int ixgbe_determine_xdp_q_idx(int cpu)
{
if (static_key_enabled(&ixgbe_xdp_locking_key))
@@ -945,6 +967,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter);
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4aaaea3b5f8f..444da982593f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1169,6 +1169,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 964988b4d58b..d5b1b974b4a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2230,6 +2230,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 7beaf6ea57f9..5784d5d1896e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -332,6 +332,7 @@ int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* Devices in the second generation:
* 82599
* X540
+ * E610
**/
int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 19d6b6fa8fb3..3dd5a16a14df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -118,14 +118,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
}
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -142,7 +142,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i, j;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
@@ -167,7 +167,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
@@ -184,7 +184,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
}
@@ -193,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 prio, u8 bwg_id, u8 bw_pct,
u8 up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
}
@@ -219,7 +219,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
@@ -230,7 +230,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
}
@@ -239,7 +239,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
u8 *prio, u8 *bwg_id, u8 *bw_pct,
u8 *up_map)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
*bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
@@ -250,7 +250,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
u8 *bw_pct)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
}
@@ -258,7 +258,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
@@ -269,14 +269,14 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
struct ixgbe_hw *hw = &adapter->hw;
int ret = DCB_NO_HW_CHG;
@@ -383,7 +383,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (capid) {
case DCB_CAP_ATTR_PG:
@@ -420,7 +420,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -447,14 +447,14 @@ static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->dcb_cfg.pfc_mode_enable;
}
static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->temp_dcb_cfg.pfc_mode_enable = state;
}
@@ -471,7 +471,7 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
*/
static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
@@ -486,7 +486,7 @@ static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
@@ -506,7 +506,7 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
struct ieee_ets *ets)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err;
__u8 max_tc = 0;
@@ -559,7 +559,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
int i;
@@ -584,7 +584,7 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 *prio_tc;
int err;
@@ -616,7 +616,7 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -661,7 +661,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
struct dcb_app *app)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int err;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -705,13 +705,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
return adapter->dcbx_cap;
}
static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
int err = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 00935747c8c5..71ea25de1bac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -344,6 +344,40 @@ void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
}
/**
+ * ixgbe_aci_get_fw_ver - Get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = le32_to_cpu(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return err;
+}
+
+/**
* ixgbe_aci_req_res - request a common resource
* @hw: pointer to the HW struct
* @res: resource ID
@@ -554,6 +588,20 @@ static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
break;
case IXGBE_ACI_CAPS_NVM_VER:
break;
+ case IXGBE_ACI_CAPS_PENDING_NVM_VER:
+ caps->nvm_update_pending_nvm = true;
+ break;
+ case IXGBE_ACI_CAPS_PENDING_OROM_VER:
+ caps->nvm_update_pending_orom = true;
+ break;
+ case IXGBE_ACI_CAPS_PENDING_NET_VER:
+ caps->nvm_update_pending_netlist = true;
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ break;
case IXGBE_ACI_CAPS_MAX_MTU:
caps->max_mtu = number;
break;
@@ -1411,6 +1459,61 @@ int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
}
/**
+ * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Get firmware version and start the hardware using the generic
+ * start_hw() and ixgbe_start_hw_gen2() functions.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_aci_get_fw_ver(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_start_hw_generic(hw);
+ if (err)
+ return err;
+
+ ixgbe_start_hw_gen2(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
* ixgbe_get_media_type_e610 - Gets media type
* @hw: pointer to the HW struct
*
@@ -1743,6 +1846,38 @@ void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM recovery mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode
+ * @hw: pointer to hardware structure
+ *
+ * Check FW NVM rollback mode by reading the value of
+ * the dedicated register.
+ *
+ * Return: true if FW is in rollback mode, otherwise false.
+ */
+static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
+
+ return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M);
+}
+
+/**
* ixgbe_init_phy_ops_e610 - PHY specific init
* @hw: pointer to hardware structure
*
@@ -2226,6 +2361,131 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
}
/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ __le16 len;
+ int err;
+
+ /* Read a length value from SR, so module_typeid is equal to 0,
+ * calculate offset where module size is placed from bytes to words
+ * set last command and read from SR values to true.
+ */
+ err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 err;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK,
+ cmd_flags);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
+}
+
+/**
* ixgbe_nvm_validate_checksum - validate checksum
* @hw: pointer to the HW struct
*
@@ -2267,6 +2527,955 @@ int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_discover_flash_size - Discover the available flash size
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (err == -EIO &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ err = 0;
+ max_size = offset;
+ } else if (!err) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
+ u32 *pointer)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
+ else
+ *pointer = value * sizeof(u16);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ u16 value;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, offset, &value);
+ if (err)
+ return err;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * SZ_4K;
+
+ return 0;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
+ &ctrl_word);
+ if (err)
+ return err;
+
+ if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ IXGBE_SR_CTRL_WORD_VALID)
+ return -ENODATA;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case IXGBE_E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ u32 start;
+ int err;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start)
+ return -EINVAL;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local,
+ sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_orom_module - Read from the active Option ROM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive OROM module
+ * @offset: offset into the OROM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active Option ROM module of the flash.
+ * Note that unlike the NVM module, the CSS data is stored at the end of the
+ * module instead of at the beginning.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ int err;
+
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset * sizeof(data_local),
+ (u8 *)&data_local, sizeof(data_local));
+ if (!err)
+ *data = le16_to_cpu(data_local);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (err)
+ return err;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size.
+ */
+ hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
+ *hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (err)
+ return err;
+
+ *srev = (srev_h << 16) | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ *
+ * Return: the exit code of the operation.
+ */
+static int
+ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
+ struct ixgbe_orom_civd_info *civd)
+{
+ struct ixgbe_orom_civd_info tmp;
+ u32 offset;
+ int err;
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
+ offset += SZ_512) {
+ u8 sum = 0;
+ u32 i;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR,
+ offset,
+ (u8 *)&tmp, sizeof(tmp));
+ if (err)
+ return err;
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
+ sizeof(tmp.signature)))
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(tmp); i++)
+ sum += ((u8 *)&tmp)[i];
+
+ if (sum)
+ return -EDOM;
+
+ *civd = tmp;
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from active or inactive flash module
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active OROM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *srev)
+{
+ u32 orom_size_word = hw->flash.banks.orom_size / 2;
+ u32 css_start, hdr_len;
+ u16 srev_l, srev_h;
+ int err;
+
+ err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (err)
+ return err;
+
+ if (orom_size_word < hdr_len)
+ return -EINVAL;
+
+ /* Calculate how far into the Option ROM the CSS header starts. Note
+ * that ixgbe_read_orom_module takes a word offset.
+ */
+ css_start = orom_size_word - hdr_len;
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_L,
+ &srev_l);
+ if (err)
+ return err;
+
+ err = ixgbe_read_orom_module(hw, bank,
+ css_start + IXGBE_NVM_CSS_SREV_H,
+ &srev_h);
+ if (err)
+ return err;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_orom_info *orom)
+{
+ struct ixgbe_orom_civd_info civd;
+ u32 combo_ver;
+ int err;
+
+ err = ixgbe_get_orom_civd_data(hw, bank, &civd);
+ if (err)
+ return err;
+
+ combo_ver = le32_to_cpu(civd.combo_ver);
+
+ orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
+ orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
+
+ return ixgbe_get_orom_srev(hw, bank, &orom->srev);
+}
+
+/**
+ * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Read the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom)
+{
+ return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ int err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank,
+ IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (err)
+ return err;
+
+ nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (err)
+ return err;
+
+ err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (err)
+ return err;
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Read the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info - Read the netlist version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ int err;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (err)
+ return err;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (err)
+ return err;
+
+ /* Sanity check that we have at least enough words to store the
+ * netlist ID block.
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE)
+ return -EIO;
+
+ err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (err)
+ return err;
+
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return -ENOMEM;
+
+ /* Read out the entire Netlist ID Block at once. */
+ err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
+ sizeof(*id_blk), (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE *
+ sizeof(*id_blk));
+ if (err)
+ goto free_id_blk;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+free_id_blk:
+ kfree(id_blk);
+ return err;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_get_flash_data - get flash data
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate flash data such as Shadow RAM size,
+ * max_timeout and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_flash_data(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat;
+ u8 sr_size;
+ int err;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
+ flash->blank_nvm_mode = false;
+ } else {
+ flash->blank_nvm_mode = true;
+ return -EIO;
+ }
+
+ err = ixgbe_discover_flash_size(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_determine_active_flash_banks(hw);
+ if (err)
+ return err;
+
+ err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (err)
+ return err;
+
+ err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->orom);
+ if (err)
+ return err;
+
+ err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+ return err;
+}
+
+/**
+ * ixgbe_aci_nvm_update_empr - update NVM using EMPR
+ * @hw: pointer to the HW struct
+ *
+ * Force EMP reset using ACI command (0x0709). This command allows SW to
+ * request an EMPR to activate new FW.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/* ixgbe_nvm_set_pkg_data - NVM set package data
+ * @hw: pointer to the HW struct
+ * @del_pkg_data_flag: If is set then the current pkg_data store by FW
+ * is deleted.
+ * If bit is set to 1, then buffer should be size 0.
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ *
+ * Set package data using ACI command (0x070A).
+ * This command is equivalent to the reception of
+ * a PLDM FW Update GetPackageData cmd. This command should be sent
+ * as part of the NVM update as the first cmd in the flow.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length)
+{
+ struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (length != 0 && !data)
+ return -EINVAL;
+
+ cmd = &desc.params.pkg_data;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ if (del_pkg_data_flag)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/* ixgbe_nvm_pass_component_tbl - NVM pass component table
+ * @hw: pointer to the HW struct
+ * @data: pointer to buffer
+ * @length: length of the buffer
+ * @transfer_flag: parameter for determining stage of the update
+ * @comp_response: a pointer to the response from the 0x070B ACI.
+ * @comp_response_code: a pointer to the response code from the 0x070B ACI.
+ *
+ * Pass component table using ACI command (0x070B). This command is equivalent
+ * to the reception of a PLDM FW Update PassComponentTable cmd.
+ * This command should be sent once per component. It can be only sent after
+ * Set Package Data cmd and before actual update. FW will assume these
+ * commands are going to be sent until the TransferFlag is set to End or
+ * StartAndEnd.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code)
+{
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!data || !comp_response || !comp_response_code)
+ return -EINVAL;
+
+ cmd = &desc.params.pass_comp_tbl;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_pass_component_tbl);
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ cmd->transfer_flag = transfer_flag;
+ err = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ if (!err) {
+ *comp_response = cmd->component_response;
+ *comp_response_code = cmd->component_response_code;
+ }
+
+ return err;
+}
+
+/**
* ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -2485,7 +3694,7 @@ int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
if (err)
return err;
- err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
&tmp_checksum);
ixgbe_release_nvm(hw);
@@ -2580,9 +3789,129 @@ reset_hw_out:
return err;
}
+/**
+ * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Find the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ int err;
+
+ err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr);
+ if (err)
+ return err;
+
+ err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len);
+ if (err)
+ return err;
+
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (err)
+ break;
+
+ /* Read TLV length */
+ err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len);
+ if (err)
+ break;
+
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return -EIO;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length).
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return -ENODATA;
+}
+
+/**
+ * ixgbe_read_pba_string_e610 - Read PBA string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Read the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ int err;
+
+ *pba_num = '\0';
+
+ err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ IXGBE_E610_SR_PBA_BLOCK_PTR);
+ if (err)
+ return err;
+
+ /* pba_size is the next word */
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size);
+ if (err)
+ return err;
+
+ if (pba_tlv_len < pba_size)
+ return -EINVAL;
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size).
+ */
+ pba_size--;
+
+ if (pba_num_size < (((u32)pba_size * 2) + 1))
+ return -EINVAL;
+
+ for (u16 i = 0; i < pba_size; i++) {
+ err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (err)
+ return err;
+
+ pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK,
+ pba_word);
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+
+ pba_num[(pba_size * 2)] = '\0';
+
+ return err;
+}
+
static const struct ixgbe_mac_operations mac_ops_e610 = {
.init_hw = ixgbe_init_hw_generic,
- .start_hw = ixgbe_start_hw_X540,
+ .start_hw = ixgbe_start_hw_e610,
.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
.enable_rx_dma = ixgbe_enable_rx_dma_generic,
.get_mac_addr = ixgbe_get_mac_addr_generic,
@@ -2621,8 +3950,12 @@ static const struct ixgbe_mac_operations mac_ops_e610 = {
.led_off = ixgbe_led_off_generic,
.init_led_link_act = ixgbe_init_led_link_act_generic,
.reset_hw = ixgbe_reset_hw_e610,
+ .get_fw_ver = ixgbe_aci_get_fw_ver,
.get_media_type = ixgbe_get_media_type_e610,
.setup_link = ixgbe_setup_link_e610,
+ .fw_recovery_mode = ixgbe_fw_recovery_mode_e610,
+ .fw_rollback_mode = ixgbe_fw_rollback_mode_e610,
+ .get_nvm_ver = ixgbe_get_active_nvm_ver,
.get_link_capabilities = ixgbe_get_link_capabilities_e610,
.get_bus_info = ixgbe_get_bus_info_generic,
.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
@@ -2647,6 +3980,8 @@ static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
.read = ixgbe_read_ee_aci_e610,
.read_buffer = ixgbe_read_ee_aci_buffer_e610,
.validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+ .read_pba_string = ixgbe_read_pba_string_e610,
+ .init_params = ixgbe_init_eeprom_params_e610,
};
const struct ixgbe_info ixgbe_e610_info = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
index ba8c06b73810..bb31d65bd1c8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -36,6 +36,7 @@ int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
struct ixgbe_link_status *link);
int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait);
@@ -67,6 +68,11 @@ int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram);
int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
+ struct ixgbe_orom_info *orom);
+int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist);
int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
u8 *data, bool read_shadow_ram);
@@ -77,5 +83,18 @@ int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+int ixgbe_get_flash_data(struct ixgbe_hw *hw);
+int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw);
+int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
+ u8 *data, u16 length);
+int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
+ u8 transfer_flag, u8 *comp_response,
+ u8 *comp_response_code);
+int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index f03925c1f521..d8a919ab7027 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -213,7 +213,7 @@ static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
static int ixgbe_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
bool autoneg = false;
@@ -458,7 +458,7 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
static int ixgbe_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
int err = 0;
@@ -535,7 +535,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
static void ixgbe_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw_stats *hwstats = &adapter->stats;
stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
@@ -545,7 +545,7 @@ static void ixgbe_get_pause_stats(struct net_device *netdev,
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (ixgbe_device_supports_autoneg_fc(hw) &&
@@ -564,10 +564,26 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
}
+static void ixgbe_set_pauseparam_finalize(struct net_device *netdev,
+ struct ixgbe_fc_info *fc)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If the thing changed then we'll update and use new autoneg. */
+ if (memcmp(fc, &hw->fc, sizeof(*fc))) {
+ hw->fc = *fc;
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+ }
+}
+
static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fc_info fc = hw->fc;
@@ -592,27 +608,52 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
else
fc.requested_mode = ixgbe_fc_none;
- /* if the thing changed then we'll update and use new autoneg */
- if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
- hw->fc = fc;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
+ return 0;
+}
+
+static int ixgbe_set_pauseparam_e610(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fc_info fc = hw->fc;
+
+ if (!ixgbe_device_supports_autoneg_fc(hw))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg == AUTONEG_DISABLE) {
+ netdev_info(netdev,
+ "Cannot disable autonegotiation on this device.\n");
+ return -EOPNOTSUPP;
}
+ fc.disable_fc_autoneg = false;
+
+ if (pause->rx_pause && pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_full;
+ else if (pause->rx_pause)
+ fc.requested_mode = ixgbe_fc_rx_pause;
+ else if (pause->tx_pause)
+ fc.requested_mode = ixgbe_fc_tx_pause;
+ else
+ fc.requested_mode = ixgbe_fc_none;
+
+ ixgbe_set_pauseparam_finalize(netdev, &fc);
+
return 0;
}
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->msg_enable;
}
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->msg_enable = data;
}
@@ -627,7 +668,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
static void ixgbe_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u8 i;
@@ -994,14 +1035,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return adapter->hw.eeprom.word_size * 2;
}
static int ixgbe_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
int first_word, last_word, eeprom_len;
@@ -1037,7 +1078,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
static int ixgbe_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
@@ -1104,10 +1145,22 @@ err:
return ret_val;
}
+void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbe_get_flash_data(hw);
+ ixgbe_set_fw_version_e610(adapter);
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ /* need to refresh info for e610 in case fw reloads in runtime */
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_refresh_fw_version(adapter);
strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
@@ -1161,7 +1214,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
@@ -1176,7 +1229,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *temp_ring;
int i, j, err = 0;
u32 new_rx_count, new_tx_count;
@@ -1336,7 +1389,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
unsigned int start;
@@ -1710,7 +1763,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
@@ -2183,7 +2236,7 @@ out:
static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
bool if_running = netif_running(netdev);
if (ixgbe_removed(adapter->hw.hw_addr)) {
@@ -2306,7 +2359,7 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
static void ixgbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
WAKE_BCAST | WAKE_MAGIC;
@@ -2328,7 +2381,7 @@ static void ixgbe_get_wol(struct net_device *netdev,
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
WAKE_FILTER))
@@ -2353,9 +2406,53 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
+static int ixgbe_set_wol_acpi(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 grc;
+
+ if (ixgbe_wol_exclusion(adapter, wol))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* disable APM wakeup */
+ grc = IXGBE_READ_REG(hw, IXGBE_GRC_X550EM_a);
+ grc &= ~IXGBE_GRC_APME;
+ IXGBE_WRITE_REG(hw, IXGBE_GRC_X550EM_a, grc);
+
+ /* erase existing filters */
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IXGBE_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IXGBE_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IXGBE_WUFC_BC;
+
+ IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_PME_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wol);
+
+ hw->wol_enabled = adapter->wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+static int ixgbe_set_wol_e610(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ if (wol->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
+ return ixgbe_set_wol_acpi(netdev, wol);
+ else
+ return ixgbe_set_wol(netdev, wol);
+}
+
static int ixgbe_nway_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -2366,7 +2463,7 @@ static int ixgbe_nway_reset(struct net_device *netdev)
static int ixgbe_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
@@ -2394,12 +2491,32 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 0;
}
+static int ixgbe_set_phys_id_e610(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+ bool led_active;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ led_active = true;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ led_active = false;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ixgbe_aci_set_port_id_led(&adapter->hw, !led_active);
+}
+
static int ixgbe_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* only valid if in constant ITR mode */
if (adapter->rx_itr_setting <= 1)
@@ -2455,7 +2572,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_q_vector *q_vector;
int i;
u16 tx_itr_param, rx_itr_param, tx_itr_prev;
@@ -2681,7 +2798,7 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3069,7 +3186,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
@@ -3096,7 +3213,7 @@ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
static u32 ixgbe_rss_indir_size(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
return ixgbe_rss_indir_tbl_entries(adapter);
}
@@ -3116,7 +3233,7 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
static int ixgbe_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3134,7 +3251,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
@@ -3176,7 +3293,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
static int ixgbe_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* we always support timestamping disabled */
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
@@ -3252,7 +3369,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
static void ixgbe_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
/* report maximum channels */
ch->max_combined = ixgbe_max_channels(adapter);
@@ -3289,7 +3406,7 @@ static void ixgbe_get_channels(struct net_device *dev,
static int ixgbe_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
unsigned int count = ch->combined_count;
u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
@@ -3327,7 +3444,7 @@ static int ixgbe_set_channels(struct net_device *dev,
static int ixgbe_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3373,7 +3490,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
int status = -EFAULT;
u8 databyte = 0xFF;
@@ -3469,7 +3586,7 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
@@ -3483,7 +3600,7 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct ethtool_keee eee_data;
int ret_val;
@@ -3538,7 +3655,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
@@ -3555,7 +3672,7 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
unsigned int flags2 = adapter->flags2;
unsigned int i;
@@ -3638,7 +3755,57 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_link_ksettings = ixgbe_set_link_ksettings,
};
+static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = ixgbe_get_drvinfo,
+ .get_regs_len = ixgbe_get_regs_len,
+ .get_regs = ixgbe_get_regs,
+ .get_wol = ixgbe_get_wol,
+ .set_wol = ixgbe_set_wol_e610,
+ .nway_reset = ixgbe_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = ixgbe_get_eeprom_len,
+ .get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
+ .get_ringparam = ixgbe_get_ringparam,
+ .set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
+ .get_pauseparam = ixgbe_get_pauseparam,
+ .set_pauseparam = ixgbe_set_pauseparam_e610,
+ .get_msglevel = ixgbe_get_msglevel,
+ .set_msglevel = ixgbe_set_msglevel,
+ .self_test = ixgbe_diag_test,
+ .get_strings = ixgbe_get_strings,
+ .set_phys_id = ixgbe_set_phys_id_e610,
+ .get_sset_count = ixgbe_get_sset_count,
+ .get_ethtool_stats = ixgbe_get_ethtool_stats,
+ .get_coalesce = ixgbe_get_coalesce,
+ .set_coalesce = ixgbe_set_coalesce,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
+ .get_rxfh_indir_size = ixgbe_rss_indir_size,
+ .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
+ .get_rxfh = ixgbe_get_rxfh,
+ .set_rxfh = ixgbe_set_rxfh,
+ .get_eee = ixgbe_get_eee,
+ .set_eee = ixgbe_set_eee,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
+ .get_priv_flags = ixgbe_get_priv_flags,
+ .set_priv_flags = ixgbe_set_priv_flags,
+ .get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
+ .get_link_ksettings = ixgbe_get_link_ksettings,
+ .set_link_ksettings = ixgbe_set_link_ksettings,
+};
+
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &ixgbe_ethtool_ops;
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ netdev->ethtool_ops = &ixgbe_ethtool_ops_e610;
+ else
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 955dced844a9..7dcf6ecd157b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -56,7 +56,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (xid >= netdev->fcoe_ddp_xid)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
@@ -153,7 +153,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
if (!netdev || !sgl)
return 0;
- adapter = netdev_priv(netdev);
+ adapter = ixgbe_from_netdev(netdev);
if (xid >= netdev->fcoe_ddp_xid) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
@@ -834,7 +834,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
atomic_inc(&fcoe->refcnt);
@@ -881,7 +881,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
return -EINVAL;
@@ -927,7 +927,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
u16 prefix = 0xffff;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
@@ -967,7 +967,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u64 dsn;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
new file mode 100644
index 000000000000..49d3b66add7e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2025 Intel Corporation. */
+
+#include <linux/crc32.h>
+#include <linux/pldmfw.h>
+#include <linux/uuid.h>
+
+#include "ixgbe.h"
+#include "ixgbe_fw_update.h"
+
+struct ixgbe_fwu_priv {
+ struct pldmfw context;
+
+ struct ixgbe_adapter *adapter;
+ struct netlink_ext_ack *extack;
+
+ /* Track which NVM banks to activate at the end of the update */
+ u8 activate_flags;
+ bool emp_reset_available;
+};
+
+/**
+ * ixgbe_send_package_data - Send record package data to firmware
+ * @context: PLDM fw update structure
+ * @data: pointer to the package data
+ * @length: length of the package data
+ *
+ * Send a copy of the package data associated with the PLDM record matching
+ * this device to the firmware.
+ *
+ * Note that this function sends an AdminQ command that will fail unless the
+ * NVM resource has been acquired.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ixgbe_send_package_data(struct pldmfw *context,
+ const u8 *data, u16 length)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *package_data;
+ int err;
+
+ package_data = kmemdup(data, length, GFP_KERNEL);
+ if (!package_data)
+ return -ENOMEM;
+
+ err = ixgbe_nvm_set_pkg_data(hw, false, package_data, length);
+
+ kfree(package_data);
+
+ return err;
+}
+
+/**
+ * ixgbe_check_component_response - Report firmware response to a component
+ * @adapter: device private data structure
+ * @response: indicates whether this component can be updated
+ * @code: code indicating reason for response
+ * @extack: netlink extended ACK structure
+ *
+ * Check whether firmware indicates if this component can be updated. Report
+ * a suitable error message over the netlink extended ACK if the component
+ * cannot be updated.
+ *
+ * Return: 0 if the component can be updated, or -ECANCELED if the
+ * firmware indicates the component cannot be updated.
+ */
+static int ixgbe_check_component_response(struct ixgbe_adapter *adapter,
+ u8 response, u8 code,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (response) {
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED:
+ /* Firmware indicated this update is good to proceed. */
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware recommends not updating, as it may result in a downgrade. Continuing anyways");
+ return 0;
+ case IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
+ NL_SET_ERR_MSG_MOD(extack, "Firmware has rejected updating.");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw))
+ return 0;
+ break;
+ }
+
+ switch (code) {
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is lower than running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component comparison stamp is invalid");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component table conflict occurred");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component not supported");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE:
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete component image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is identical to running image");
+ break;
+ case IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Component version is lower than the running image");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Received unexpected response code from firmware");
+ break;
+ }
+
+ return -ECANCELED;
+}
+
+/**
+ * ixgbe_send_component_table - Send PLDM component table to firmware
+ * @context: PLDM fw update structure
+ * @component: the component to process
+ * @transfer_flag: relative transfer order of this component
+ *
+ * Read relevant data from the component and forward it to the device
+ * firmware. Check the response to determine if the firmware indicates that
+ * the update can proceed.
+ *
+ * This function sends ACI commands related to the NVM, and assumes that
+ * the NVM resource has been acquired.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_send_component_table(struct pldmfw *context,
+ struct pldmfw_component *component,
+ u8 transfer_flag)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_aci_cmd_nvm_comp_tbl *comp_tbl;
+ u8 comp_response, comp_response_code;
+ struct ixgbe_hw *hw = &adapter->hw;
+ size_t length;
+ int err;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ case NVM_COMP_ID_NVM:
+ case NVM_COMP_ID_NETLIST:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to update due to unknown firmware component");
+ return -EOPNOTSUPP;
+ }
+
+ length = struct_size(comp_tbl, cvs, component->version_len);
+ comp_tbl = kzalloc(length, GFP_KERNEL);
+ if (!comp_tbl)
+ return -ENOMEM;
+
+ comp_tbl->comp_class = cpu_to_le16(component->classification);
+ comp_tbl->comp_id = cpu_to_le16(component->identifier);
+ comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE;
+ comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp);
+ comp_tbl->cvs_type = component->version_type;
+ comp_tbl->cvs_len = component->version_len;
+
+ memcpy(comp_tbl->cvs, component->version_string,
+ component->version_len);
+
+ err = ixgbe_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
+ transfer_flag, &comp_response,
+ &comp_response_code);
+
+ kfree(comp_tbl);
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transfer component table to firmware");
+ return -EIO;
+ }
+
+ return ixgbe_check_component_response(adapter,
+ comp_response,
+ comp_response_code, extack);
+}
+
+/**
+ * ixgbe_write_one_nvm_block - Write an NVM block and await completion response
+ * @adapter: the PF data structure
+ * @module: the module to write to
+ * @offset: offset in bytes
+ * @block_size: size of the block to write, up to 4k
+ * @block: pointer to block of data to write
+ * @last_cmd: whether this is the last command
+ * @extack: netlink extended ACK structure
+ *
+ * Write a block of data to a flash module, and await for the completion
+ * response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - IXGBE_ACI_NVM_POR_FLAG - A full power on is required
+ * 1 - IXGBE_ACI_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - IXGBE_ACI_NVM_EMPR_FLAG - An EMP reset is required
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_one_nvm_block(struct ixgbe_adapter *adapter,
+ u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ return ixgbe_aci_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0);
+}
+
+/**
+ * ixgbe_write_nvm_module - Write data to an NVM module
+ * @adapter: the PF driver structure
+ * @module: the module id to program
+ * @component: the name of the component being updated
+ * @image: buffer of image data to write to the NVM
+ * @length: length of the buffer
+ * @extack: netlink extended ACK structure
+ *
+ * Loop over the data for a given NVM module and program it in 4 Kb
+ * blocks. Notify devlink core of progress after each block is programmed.
+ * Loops over a block of data and programs the NVM in 4k block chunks.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_write_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component, const u8 *image,
+ u32 length,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ u32 offset = 0;
+ bool last_cmd;
+ u8 *block;
+ int err;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, 0, length);
+
+ block = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ do {
+ u32 block_size;
+
+ block_size = min_t(u32, IXGBE_ACI_MAX_BUFFER_SIZE,
+ length - offset);
+ last_cmd = !(offset + block_size < length);
+
+ memcpy(block, image + offset, block_size);
+
+ err = ixgbe_write_one_nvm_block(adapter, module, offset,
+ block_size, block, last_cmd,
+ extack);
+ if (err)
+ break;
+
+ offset += block_size;
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component, offset, length);
+ } while (!last_cmd);
+
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ component, length, length);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ component, length, length);
+
+ kfree(block);
+
+ return err;
+}
+
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define IXGBE_FW_ERASE_TIMEOUT 300
+
+/**
+ * ixgbe_erase_nvm_module - Erase an NVM module and await firmware completion
+ * @adapter: the PF data structure
+ * @module: the module to erase
+ * @component: name of the component being updated
+ * @extack: netlink extended ACK structure
+ *
+ * Erase the inactive NVM bank associated with this module, and await for
+ * a completion response message from firmware.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_erase_nvm_module(struct ixgbe_adapter *adapter, u16 module,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component,
+ IXGBE_FW_ERASE_TIMEOUT);
+
+ err = ixgbe_aci_erase_nvm(hw, module);
+ if (err)
+ devlink_flash_update_status_notify(devlink, "Erasing failed",
+ component, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Erasing done",
+ component, 0, 0);
+
+ return err;
+}
+
+/**
+ * ixgbe_switch_flash_banks - Tell firmware to switch NVM banks
+ * @adapter: Pointer to the PF data structure
+ * @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
+ * @extack: netlink extended ACK structure
+ *
+ * Notify firmware to activate the newly written flash banks, and wait for the
+ * firmware response.
+ *
+ * Return: 0 on success or an error code on failure.
+ */
+static int ixgbe_switch_flash_banks(struct ixgbe_adapter *adapter,
+ u8 activate_flags,
+ bool *emp_reset_available,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 response_flags;
+ int err;
+
+ err = ixgbe_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to switch active flash banks");
+ return err;
+ }
+
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support)
+ *emp_reset_available =
+ response_flags & IXGBE_ACI_NVM_EMPR_ENA;
+ else
+ *emp_reset_available = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_flash_component - Flash a component of the NVM
+ * @context: PLDM fw update structure
+ * @component: the component table to program
+ *
+ * Program the flash contents for a given component. First, determine the
+ * module id. Then, erase the secondary bank for this module. Finally, write
+ * the contents of the component to the NVM.
+ *
+ * Note this function assumes the caller has acquired the NVM resource.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct netlink_ext_ack *extack = priv->extack;
+ struct ixgbe_adapter *adapter = priv->adapter;
+ const char *name;
+ u16 module;
+ int err;
+ u8 flag;
+
+ switch (component->identifier) {
+ case NVM_COMP_ID_OROM:
+ module = IXGBE_E610_SR_1ST_OROM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ name = "fw.undi";
+ break;
+ case NVM_COMP_ID_NVM:
+ module = IXGBE_E610_SR_1ST_NVM_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ name = "fw.mgmt";
+ break;
+ case NVM_COMP_ID_NETLIST:
+ module = IXGBE_E610_SR_NETLIST_BANK_PTR;
+ flag = IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ name = "fw.netlist";
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Mark this component for activating at the end. */
+ priv->activate_flags |= flag;
+
+ err = ixgbe_erase_nvm_module(adapter, module, name, extack);
+ if (err)
+ return err;
+
+ return ixgbe_write_nvm_module(adapter, module, name,
+ component->component_data,
+ component->component_size, extack);
+}
+
+/**
+ * ixgbe_finalize_update - Perform last steps to complete device update
+ * @context: PLDM fw update structure
+ *
+ * Called as the last step of the update process. Complete the update by
+ * telling the firmware to switch active banks, and perform a reset of
+ * configured.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ixgbe_finalize_update(struct pldmfw *context)
+{
+ struct ixgbe_fwu_priv *priv = container_of(context,
+ struct ixgbe_fwu_priv,
+ context);
+ struct ixgbe_adapter *adapter = priv->adapter;
+ struct netlink_ext_ack *extack = priv->extack;
+ struct devlink *devlink = adapter->devlink;
+ int err;
+
+ /* Finally, notify firmware to activate the written NVM banks */
+ err = ixgbe_switch_flash_banks(adapter, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
+
+ adapter->fw_emp_reset_disabled = !priv->emp_reset_available;
+
+ if (!adapter->fw_emp_reset_disabled)
+ devlink_flash_update_status_notify(devlink,
+ "Suggested is to activate new firmware by devlink reload, if it doesn't work then a power cycle is required",
+ NULL, 0, 0);
+
+ return 0;
+}
+
+static const struct pldmfw_ops ixgbe_fwu_ops_e610 = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ixgbe_send_package_data,
+ .send_component_table = &ixgbe_send_component_table,
+ .flash_component = &ixgbe_flash_component,
+ .finalize_update = &ixgbe_finalize_update,
+};
+
+/**
+ * ixgbe_get_pending_updates - Check if the component has a pending update
+ * @adapter: the PF driver structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
+ *
+ * Check if the device has any pending updates on any flash components.
+ *
+ * Return: 0 on success, or a negative error code on failure. Update
+ * pending with the bitmap of pending updates.
+ */
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_hw_dev_caps *dev_caps;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
+ if (!dev_caps)
+ return -ENOMEM;
+
+ err = ixgbe_discover_dev_caps(hw, dev_caps);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read device capabilities");
+ kfree(dev_caps);
+ return -EIO;
+ }
+
+ *pending = 0;
+
+ if (dev_caps->common_cap.nvm_update_pending_nvm)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+
+ if (dev_caps->common_cap.nvm_update_pending_orom)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+
+ if (dev_caps->common_cap.nvm_update_pending_netlist)
+ *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+
+ kfree(dev_caps);
+
+ return 0;
+}
+
+/**
+ * ixgbe_cancel_pending_update - Cancel any pending update for a component
+ * @adapter: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ixgbe_cancel_pending_update(struct ixgbe_adapter *adapter,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = adapter->devlink;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 pending;
+ int err;
+
+ err = ixgbe_get_pending_updates(adapter, &pending, extack);
+ if (err)
+ return err;
+
+ /* If the flash_update request is for a specific component, ignore all
+ * of the other components.
+ */
+ if (component) {
+ if (strcmp(component, "fw.mgmt") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NVM;
+ else if (strcmp(component, "fw.undi") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_OROM;
+ else if (strcmp(component, "fw.netlist") == 0)
+ pending &= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST;
+ else
+ return -EINVAL;
+ }
+
+ /* There is no previous pending update, so this request may continue */
+ if (!pending)
+ return 0;
+
+ /* In order to allow overwriting a previous pending update, notify
+ * firmware to cancel that update by issuing the appropriate command.
+ */
+ devlink_flash_update_status_notify(devlink,
+ "Canceling previous pending update",
+ component, 0, 0);
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ pending |= IXGBE_ACI_NVM_REVERT_LAST_ACTIV;
+ err = ixgbe_switch_flash_banks(adapter, pending, NULL, extack);
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_flash_pldm_image - Write a PLDM-formatted firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ixgbe_adapter *adapter = devlink_priv(devlink);
+ struct device *dev = &adapter->pdev->dev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ switch (params->overwrite_mask) {
+ case 0:
+ /* preserve all settings and identifiers */
+ preservation = IXGBE_ACI_NVM_PRESERVE_ALL;
+ break;
+ case DEVLINK_FLASH_OVERWRITE_SETTINGS:
+ /* Overwrite settings, but preserve vital information such as
+ * device identifiers.
+ */
+ preservation = IXGBE_ACI_NVM_PRESERVE_SELECTED;
+ break;
+ case (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS):
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = IXGBE_ACI_NVM_NO_PRESERVATION;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* Cannot get caps in recovery mode, so lack of nvm_unified_update bit
+ * cannot lead to error
+ */
+ if (!hw->dev_caps.common_cap.nvm_unified_update &&
+ (hw->mac.ops.fw_recovery_mode &&
+ !hw->mac.ops.fw_recovery_mode(hw))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ixgbe_fwu_ops_e610;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.adapter = adapter;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash", NULL, 0, 0);
+
+ err = ixgbe_cancel_pending_update(adapter, NULL, extack);
+ if (err)
+ return err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to acquire device flash lock");
+ return -EIO;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image has no record matching this device");
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to flash PLDM image");
+ }
+
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
new file mode 100644
index 000000000000..abdd708c93df
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2025 Intel Corporation. */
+
+#ifndef _IXGBE_FW_UPDATE_H_
+#define _IXGBE_FW_UPDATE_H_
+
+int ixgbe_flash_pldm_image(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending,
+ struct netlink_ext_ack *extack);
+#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 07ea1954a276..d1f4073b36f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -9,7 +9,7 @@
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs);
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs);
/**
* ixgbe_ipsec_set_tx_sa - set the Tx SA registers
@@ -321,7 +321,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (r->used) {
if (r->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(r->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, r->xs);
else
ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi,
r->key, r->salt,
@@ -330,7 +330,7 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
if (t->used) {
if (t->mode & IXGBE_RXTXMOD_VF)
- ixgbe_ipsec_del_sa(t->xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, t->xs);
else
ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt);
}
@@ -417,6 +417,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
/**
* ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -424,10 +425,10 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbe_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -473,12 +474,13 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
+ * @dev: pointer to net device
* @xs: pointer to transformer state struct
**/
-static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
+static int ixgbe_ipsec_check_mgmt_ip(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
u32 mfval, manc, reg;
int num_filters = 4;
@@ -556,14 +558,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbe_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
int checked, match, first;
@@ -581,7 +584,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
return -EINVAL;
}
- if (ixgbe_ipsec_check_mgmt_ip(xs)) {
+ if (ixgbe_ipsec_check_mgmt_ip(dev, xs)) {
NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
@@ -615,7 +618,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, rsa.key, &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -724,7 +727,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbe_ipsec_parse_proto_keys(dev, xs, tsa.key, &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -752,12 +755,12 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbe_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbe_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct ixgbe_hw *hw = &adapter->hw;
u32 zerobuf[4] = {0, 0, 0, 0};
@@ -841,7 +844,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->rx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->rx_tbl[i].xs);
}
/* search tx sa table */
@@ -850,7 +854,8 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
continue;
if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF &&
ipsec->tx_tbl[i].vf == vf)
- ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs);
+ ixgbe_ipsec_del_sa(adapter->netdev,
+ ipsec->tx_tbl[i].xs);
}
}
@@ -930,7 +935,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs, NULL);
+ err = ixgbe_ipsec_add_sa(adapter->netdev, xs, NULL);
if (err)
goto err_aead;
@@ -1034,7 +1039,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
xs = ipsec->tx_tbl[sa_idx].xs;
}
- ixgbe_ipsec_del_sa(xs);
+ ixgbe_ipsec_del_sa(adapter->netdev, xs);
/* remove the xs that was made-up in the add request */
kfree_sensitive(xs);
@@ -1052,7 +1057,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
{
- struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
struct sec_path *sp;
@@ -1142,7 +1147,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(rx_ring->netdev);
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a2718218963e..03d31e5b131d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -49,6 +49,7 @@
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
+#include "devlink/devlink.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -1095,7 +1096,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
static int ixgbe_tx_maxrate(struct net_device *netdev,
int queue_index, u32 maxrate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 bcnrc_val = ixgbe_link_mbps(adapter);
@@ -4678,7 +4679,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
@@ -4737,7 +4738,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
@@ -4962,7 +4963,7 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
**/
static int ixgbe_write_mc_addr_list(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!netif_running(netdev))
@@ -5138,7 +5139,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret;
ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
@@ -5148,7 +5149,7 @@ static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
@@ -5166,7 +5167,7 @@ static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
**/
void ixgbe_set_rx_mode(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
netdev_features_t features = netdev->features;
@@ -5268,7 +5269,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
struct udp_tunnel_info ti;
@@ -6600,7 +6601,7 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
**/
static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* Do the reset outside of interrupt context */
ixgbe_tx_timeout_reset(adapter);
@@ -6849,7 +6850,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
- if (ixgbe_init_eeprom_params_generic(hw)) {
+ if (hw->eeprom.ops.init_params(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
@@ -7165,7 +7166,7 @@ static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
**/
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (ixgbe_enabled_xdp_adapter(adapter)) {
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
@@ -7212,7 +7213,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
**/
int ixgbe_open(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
@@ -7316,7 +7317,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
**/
int ixgbe_close(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
ixgbe_ptp_stop(adapter);
@@ -8364,6 +8365,34 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
rtnl_unlock();
}
+static int ixgbe_check_fw_api_mismatch(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return 0;
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ return 0;
+
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ e_dev_err("The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ return -EOPNOTSUPP;
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > IXGBE_FW_API_VER_MINOR + IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - IXGBE_FW_API_VER_DIFF_ALLOWED) {
+ e_dev_info("The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ adapter->flags2 |= IXGBE_FLAG2_API_MISMATCH;
+ }
+
+ return 0;
+}
+
/**
* ixgbe_check_fw_error - Check firmware for errors
* @adapter: the adapter private structure
@@ -8374,12 +8403,14 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 fwsm;
+ int err;
/* read fwsm.ext_err_ind register and log errors */
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+ /* skip if E610's FW is reloading, warning in that case may be misleading */
if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK ||
- !(fwsm & IXGBE_FWSM_FW_VAL_BIT))
+ (!(fwsm & IXGBE_FWSM_FW_VAL_BIT) && !(hw->mac.type == ixgbe_mac_e610)))
e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n",
fwsm);
@@ -8387,10 +8418,53 @@ static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter)
e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
return true;
}
+ if (!(adapter->flags2 & IXGBE_FLAG2_API_MISMATCH)) {
+ err = ixgbe_check_fw_api_mismatch(adapter);
+ if (err)
+ return true;
+ }
+
+ /* return here if FW rollback mode has been already detected */
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ROLLBACK)
+ return false;
+
+ if (hw->mac.ops.fw_rollback_mode && hw->mac.ops.fw_rollback_mode(hw)) {
+ struct ixgbe_nvm_info *nvm_info = &adapter->hw.flash.nvm;
+ char ver_buff[64] = "";
+
+ if (hw->mac.ops.get_fw_ver && hw->mac.ops.get_fw_ver(hw))
+ goto no_version;
+
+ if (hw->mac.ops.get_nvm_ver &&
+ hw->mac.ops.get_nvm_ver(hw, nvm_info))
+ goto no_version;
+
+ snprintf(ver_buff, sizeof(ver_buff),
+ "Current version is NVM:%x.%x.%x, FW:%d.%d. ",
+ nvm_info->major, nvm_info->minor, nvm_info->eetrack,
+ hw->fw_maj_ver, hw->fw_maj_ver);
+no_version:
+ e_dev_warn("Firmware rollback mode detected. %sDevice may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode.",
+ ver_buff);
+
+ adapter->flags2 |= IXGBE_FLAG2_FW_ROLLBACK;
+ }
return false;
}
+static void ixgbe_recovery_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_service_event_complete(adapter);
+
+ mod_timer(&adapter->service_timer, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ixgbe_service_task - manages and runs subtasks
* @work: pointer to work_struct containing our data
@@ -8410,8 +8484,13 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
if (ixgbe_check_fw_error(adapter)) {
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ if (adapter->mii_bus) {
+ mdiobus_unregister(adapter->mii_bus);
+ adapter->mii_bus = NULL;
+ }
unregister_netdev(adapter->netdev);
+ }
ixgbe_service_event_complete(adapter);
return;
}
@@ -9001,7 +9080,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_FCOE):
case htons(ETH_P_FIP):
- adapter = netdev_priv(dev);
+ adapter = ixgbe_from_netdev(dev);
if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
break;
@@ -9260,7 +9339,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev,
struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_ring *tx_ring;
/*
@@ -9292,7 +9371,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
**/
static int ixgbe_set_mac(struct net_device *netdev, void *p)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
@@ -9310,7 +9389,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
static int
ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u16 value;
int rc;
@@ -9336,7 +9415,7 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
u16 addr, u16 value)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (adapter->mii_bus) {
@@ -9356,7 +9435,7 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
switch (cmd) {
case SIOCSHWTSTAMP:
@@ -9382,7 +9461,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
if (is_valid_ether_addr(hw->mac.san_addr)) {
@@ -9406,7 +9485,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
static int ixgbe_del_sanmac_netdev(struct net_device *dev)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
if (is_valid_ether_addr(mac->san_addr)) {
@@ -9437,7 +9516,7 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
static void ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int i;
rcu_read_lock();
@@ -9480,7 +9559,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
struct ifla_vf_stats *vf_stats)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf < 0 || vf >= adapter->num_vfs)
return -EINVAL;
@@ -9597,7 +9676,7 @@ static int ixgbe_reassign_macvlan_pool(struct net_device *vdev,
static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct netdev_nested_priv priv = {
.data = (void *)adapter,
};
@@ -9618,7 +9697,7 @@ static void ixgbe_defrag_macvlan_pools(struct net_device *dev)
*/
int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_hw *hw = &adapter->hw;
/* Hardware supports up to 8 traffic classes */
@@ -10176,7 +10255,7 @@ static LIST_HEAD(ixgbe_block_cb_list);
static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (type) {
case TC_SETUP_BLOCK:
@@ -10204,7 +10283,7 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
#endif
void ixgbe_do_reset(struct net_device *netdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -10215,7 +10294,7 @@ void ixgbe_do_reset(struct net_device *netdev)
static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
@@ -10252,7 +10331,7 @@ static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter)
static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t features)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
netdev_features_t changed = netdev->features ^ features;
bool need_reset = false;
@@ -10328,7 +10407,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u16 pool = VMDQ_P(0);
if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
@@ -10416,7 +10495,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags,
struct netlink_ext_ack *extack)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct nlattr *attr, *br_spec;
int rem;
@@ -10444,7 +10523,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
u32 filter_mask, int nlflags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return 0;
@@ -10456,7 +10535,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
struct ixgbe_fwd_adapter *accel;
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
@@ -10553,7 +10632,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
struct ixgbe_fwd_adapter *accel = priv;
- struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(pdev);
unsigned int rxbase = accel->rx_base_queue;
unsigned int i;
@@ -10631,7 +10710,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct bpf_prog *old_prog;
bool need_reset;
int num_queues;
@@ -10703,7 +10782,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -10738,7 +10817,7 @@ void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
int nxmit = 0;
int i;
@@ -11146,7 +11225,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
* format to display. The FW version is taken from the EEPROM/NVM.
*
*/
-static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
{
struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
@@ -11197,6 +11276,66 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_recovery_probe - Handle FW recovery mode during probe
+ * @adapter: the adapter private structure
+ *
+ * Perform limited driver initialization when FW error is detected.
+ *
+ * Return: 0 on successful probe for E610, -EIO if recovery mode is detected
+ * for non-E610 adapter, error status code on any other case.
+ */
+static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool disable_dev;
+ int err = -EIO;
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ goto clean_up_probe;
+
+ ixgbe_get_hw_control(adapter);
+ mutex_init(&hw->aci.lock);
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto shutdown_aci;
+
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
+ INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+ pci_set_drvdata(pdev, adapter);
+ /* We are creating devlink interface so NIC can be managed,
+ * e.g. new NVM image loaded
+ */
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev,
+ &adapter->devlink_port);
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
+
+ return 0;
+shutdown_aci:
+ mutex_destroy(&adapter->hw.aci.lock);
+ ixgbe_release_hw_control(adapter);
+ devlink_free(adapter->devlink);
+clean_up_probe:
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
+ free_netdev(netdev);
+ pci_release_mem_regions(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ixgbe_pci_tbl
@@ -11210,6 +11349,7 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
+ struct ixgbe_netdevice_priv *netdev_priv_wrapper;
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
@@ -11263,7 +11403,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
indices = IXGBE_MAX_RSS_INDICES_X550;
}
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
+ adapter = ixgbe_allocate_devlink(&pdev->dev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto err_devlink;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(*netdev_priv_wrapper), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -11271,7 +11417,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
+ netdev_priv_wrapper = netdev_priv(netdev);
+ netdev_priv_wrapper->adapter = adapter;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -11287,11 +11434,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- netdev->netdev_ops = &ixgbe_netdev_ops;
- ixgbe_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
-
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
@@ -11321,15 +11463,31 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
+ netdev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbe_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
/* setup the private structure */
err = ixgbe_sw_init(adapter, ii);
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (ixgbe_check_fw_error(adapter))
+ return ixgbe_recovery_probe(adapter);
+
if (adapter->hw.mac.type == ixgbe_mac_e610) {
err = ixgbe_get_caps(&adapter->hw);
if (err)
dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+
+ err = ixgbe_get_flash_data(&adapter->hw);
+ if (err)
+ goto err_sw_init;
}
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
@@ -11348,10 +11506,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -11504,11 +11658,6 @@ skip_sriov:
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
- if (ixgbe_check_fw_error(adapter)) {
- err = -EIO;
- goto err_sw_init;
- }
-
/* make sure the EEPROM is good */
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n");
@@ -11591,7 +11740,7 @@ skip_sriov:
if (expected_gts > 0)
ixgbe_check_minimum_link(adapter, expected_gts);
- err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
+ err = hw->eeprom.ops.read_pba_string(hw, part_str, sizeof(part_str));
if (err)
strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
@@ -11617,6 +11766,11 @@ skip_sriov:
}
strcpy(netdev->name, "eth%d");
pci_set_drvdata(pdev, adapter);
+
+ devl_lock(adapter->devlink);
+ ixgbe_devlink_register_port(adapter);
+ SET_NETDEV_DEVLINK_PORT(adapter->netdev, &adapter->devlink_port);
+
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -11671,11 +11825,16 @@ skip_sriov:
if (err)
goto err_netdev;
+ ixgbe_devlink_init_regions(adapter);
+ devl_register(adapter->devlink);
+ devl_unlock(adapter->devlink);
return 0;
err_netdev:
unregister_netdev(netdev);
err_register:
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
if (hw->mac.type == ixgbe_mac_e610)
@@ -11692,7 +11851,9 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
+err_devlink:
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -11721,6 +11882,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ devl_lock(adapter->devlink);
+ devl_unregister(adapter->devlink);
+ ixgbe_devlink_destroy_regions(adapter);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
@@ -11756,6 +11920,10 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ devl_port_unregister(&adapter->devlink_port);
+ devl_unlock(adapter->devlink);
+ devlink_free(adapter->devlink);
+
ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 0a03a8bb5f88..2d54828bdfbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -167,7 +167,7 @@ int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 1;
+ int max_retry = 3;
int retry = 0;
u8 reg_high;
u8 csum;
@@ -2285,7 +2285,7 @@ static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- u32 max_retry = 1;
+ u32 max_retry = 3;
u32 retry = 0;
int status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index ccdce80edd14..0dbbd2befd4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int retval;
if (vf >= adapter->num_vfs)
@@ -1526,7 +1526,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
int err = 0;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
@@ -1644,7 +1644,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int link_speed;
/* verify VF is active */
@@ -1679,7 +1679,7 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vf >= adapter->num_vfs)
@@ -1757,7 +1757,7 @@ void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
**/
int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
int ret = 0;
if (vf < 0 || vf >= adapter->num_vfs) {
@@ -1794,7 +1794,7 @@ int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
/* This operation is currently supported only for 82599 and x540
* devices.
@@ -1813,7 +1813,7 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
@@ -1836,7 +1836,7 @@ int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
if (vf >= adapter->num_vfs)
return -EINVAL;
ivi->vf = vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 5fdf32d79d82..892fa6c1f879 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3446,6 +3446,8 @@ struct ixgbe_eeprom_operations {
int (*validate_checksum)(struct ixgbe_hw *, u16 *);
int (*update_checksum)(struct ixgbe_hw *);
int (*calc_checksum)(struct ixgbe_hw *);
+ int (*read_pba_string)(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
};
struct ixgbe_mac_operations {
@@ -3454,6 +3456,7 @@ struct ixgbe_mac_operations {
int (*start_hw)(struct ixgbe_hw *);
int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ int (*get_fw_ver)(struct ixgbe_hw *hw);
int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
int (*get_device_caps)(struct ixgbe_hw *, u16 *);
@@ -3522,6 +3525,8 @@ struct ixgbe_mac_operations {
int (*get_thermal_sensor_data)(struct ixgbe_hw *);
int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ int (*get_nvm_ver)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 617e07878e4f..09df67f03cf4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -10,11 +10,75 @@
#define IXGBE_MAX_VSI 768
/* Checksum and Shadow RAM pointers */
-#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_NVM_CTRL_WORD 0x00
+#define IXGBE_E610_SR_PBA_BLOCK_PTR 0x16
+#define IXGBE_E610_SR_PBA_BLOCK_MASK GENMASK(15, 8)
+#define IXGBE_E610_SR_NVM_DEV_STARTER_VER 0x18
+#define IXGBE_E610_SR_NVM_EETRACK_LO 0x2D
+#define IXGBE_E610_SR_NVM_EETRACK_HI 0x2E
+#define IXGBE_E610_NVM_VER_LO_MASK GENMASK(7, 0)
+#define IXGBE_E610_NVM_VER_HI_MASK GENMASK(15, 12)
+#define IXGBE_E610_SR_SW_CHECKSUM_WORD 0x3F
+#define IXGBE_E610_SR_PFA_PTR 0x40
+#define IXGBE_E610_SR_1ST_NVM_BANK_PTR 0x42
+#define IXGBE_E610_SR_NVM_BANK_SIZE 0x43
+#define IXGBE_E610_SR_1ST_OROM_BANK_PTR 0x44
+#define IXGBE_E610_SR_OROM_BANK_SIZE 0x45
+#define IXGBE_E610_SR_NETLIST_BANK_PTR 0x46
+#define IXGBE_E610_SR_NETLIST_BANK_SIZE 0x47
+
+/* The OROM version topology */
+#define IXGBE_OROM_VER_PATCH_MASK GENMASK_ULL(7, 0)
+#define IXGBE_OROM_VER_BUILD_MASK GENMASK_ULL(23, 8)
+#define IXGBE_OROM_VER_MASK GENMASK_ULL(31, 24)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+#define IXGBE_HDR_LEN_ROUNDUP 32
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
/* Shadow RAM related */
#define IXGBE_SR_WORDS_IN_1KB 512
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M GENMASK_ULL(9, 0)
+
/* Firmware Status Register (GL_FWSTS) */
#define GL_FWSTS 0x00083048 /* Reset Source: POR */
#define GL_FWSTS_EP_PF0 BIT(24)
@@ -24,11 +88,23 @@
#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+#define IXGBE_GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define IXGBE_GL_MNG_FWSM_RECOVERY_M BIT(1)
+#define IXGBE_GL_MNG_FWSM_ROLLBACK_M BIT(2)
+
/* Flash Access Register */
#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
#define IXGBE_GLNVM_FLA_LOCKED_S 6
#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_M GENMASK(7, 6)
+#define IXGBE_SR_CTRL_WORD_VALID BIT(0)
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
/* Admin Command Interface (ACI) registers */
#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
@@ -40,6 +116,10 @@
#define IXGBE_PF_HICR_SV BIT(2)
#define IXGBE_PF_HICR_EV BIT(3)
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
#define IXGBE_ACI_DESC_SIZE 32
#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
@@ -143,6 +223,7 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_write_mdio = 0x06E5,
ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
ixgbe_aci_opc_set_gpio = 0x06EC,
ixgbe_aci_opc_get_gpio = 0x06ED,
ixgbe_aci_opc_sff_eeprom = 0x06EE,
@@ -728,6 +809,18 @@ struct ixgbe_aci_cmd_get_link_topo_pin {
u8 rsvd[7];
};
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 ident_mode;
+ u8 rsvd[13];
+};
+
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ixgbe_aci_cmd_sff_eeprom {
u8 lport_num;
@@ -761,6 +854,8 @@ struct ixgbe_aci_cmd_nvm {
#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
__le16 offset_low;
u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+#define IXGBE_ACI_NVM_OFFSET_HI_A_MASK GENMASK(15, 8)
+#define IXGBE_ACI_NVM_OFFSET_HI_U_MASK GENMASK(23, 16)
u8 cmd_flags;
#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
@@ -776,6 +871,9 @@ struct ixgbe_aci_cmd_nvm {
#define IXGBE_ACI_NVM_PERST_FLAG 1
#define IXGBE_ACI_NVM_EMPR_FLAG 2
#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+#define IXGBE_ACI_NVM_NO_PRESERVATION 0x0
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED 0x6
+
/* For Write Activate, several flags are sent as part of a separate
* flags2 field using a separate byte. For simplicity of the software
* interface, we pass the flags as a 16 bit value so these flags are
@@ -805,6 +903,63 @@ struct ixgbe_aci_cmd_nvm_checksum {
u8 rsvd2[12];
};
+/* Used for NVM Set Package Data command - 0x070A */
+struct ixgbe_aci_cmd_nvm_pkg_data {
+ u8 reserved[3];
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_PKG_DELETE BIT(0) /* used for command call */
+
+ u32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Used for Pass Component Table command - 0x070B */
+struct ixgbe_aci_cmd_nvm_pass_comp_tbl {
+ u8 component_response; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_PARTIAL_CHECK 0x3
+ u8 component_response_code; /* Response only */
+#define IXGBE_ACI_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
+#define IXGBE_ACI_NVM_PASS_COMP_STAMP_LOWER 0x2
+#define IXGBE_ACI_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3
+#define IXGBE_ACI_NVM_PASS_COMP_CONFLICT_CODE 0x4
+#define IXGBE_ACI_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5
+#define IXGBE_ACI_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6
+#define IXGBE_ACI_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7
+#define IXGBE_ACI_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA
+#define IXGBE_ACI_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB
+ u8 reserved;
+ u8 transfer_flag;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ixgbe_aci_cmd_nvm_comp_tbl {
+ __le16 comp_class;
+#define NVM_COMP_CLASS_ALL_FW 0x000A
+
+ __le16 comp_id;
+#define NVM_COMP_ID_OROM 0x5
+#define NVM_COMP_ID_NVM 0x6
+#define NVM_COMP_ID_NETLIST 0x8
+
+ u8 comp_class_idx;
+#define FWU_COMP_CLASS_IDX_NOT_USE 0x0
+
+ __le32 comp_cmp_stamp;
+ u8 cvs_type;
+#define NVM_CVS_TYPE_ASCII 0x1
+
+ u8 cvs_len;
+ u8 cvs[]; /* Component Version String */
+} __packed;
+
/**
* struct ixgbe_aci_desc - Admin Command (AC) descriptor
* @flags: IXGBE_ACI_FLAG_* flags
@@ -843,11 +998,14 @@ struct ixgbe_aci_desc {
struct ixgbe_aci_cmd_restart_an restart_an;
struct ixgbe_aci_cmd_get_link_status get_link_status;
struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
struct ixgbe_aci_cmd_get_link_topo get_link_topo;
struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
struct ixgbe_aci_cmd_nvm nvm;
struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_nvm_pkg_data pkg_data;
+ struct ixgbe_aci_cmd_nvm_pass_comp_tbl pass_comp_tbl;
} params;
};
@@ -984,6 +1142,16 @@ struct ixgbe_hw_caps {
#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
} __packed;
+#define IXGBE_OROM_CIV_SIGNATURE "$CIV"
+
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+
/* Function specific capabilities */
struct ixgbe_hw_func_caps {
u32 num_allocd_vfs; /* Number of allocated VFs */
@@ -1015,6 +1183,11 @@ struct ixgbe_aci_info {
enum ixgbe_aci_err last_status; /* last status of sent admin command */
};
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
/* Option ROM version information */
struct ixgbe_orom_info {
u8 major; /* Major version of OROM */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 1fc821fb351a..f1ab95aa8c83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -894,6 +894,7 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
.validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
+ .read_pba_string = &ixgbe_read_pba_string_generic,
};
static const struct ixgbe_phy_operations phy_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 277ceaf8a793..1d2acdb64f45 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3959,6 +3959,7 @@ static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
.validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
+ .read_pba_string = &ixgbe_read_pba_string_generic, \
static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 3e3b471e53f0..ac58964b2f08 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -508,7 +508,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
struct ixgbe_ring *ring;
if (test_bit(__IXGBE_DOWN, &adapter->state))
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 8ba037e3d9c2..65580b9cb06f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -201,6 +201,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
/**
* ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
+ * @dev: pointer to net device to program
* @xs: pointer to xfrm_state struct
* @mykey: pointer to key array to populate
* @mysalt: pointer to salt value to populate
@@ -208,10 +209,10 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
* This copies the protocol keys and salt to our own data tables. The
* 82599 family only supports the one algorithm.
**/
-static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int ixgbevf_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -256,13 +257,14 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
* @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+static int ixgbevf_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
@@ -310,7 +312,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
rsa.decrypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, rsa.key,
+ &rsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
@@ -363,7 +366,8 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
if (xs->id.proto & IPPROTO_ESP)
tsa.encrypt = xs->ealg || xs->aead;
- ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
+ ret = ixgbevf_ipsec_parse_proto_keys(dev, xs, tsa.key,
+ &tsa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
@@ -388,11 +392,12 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_del_sa - clear out this specific SA
+ * @dev: pointer to net device to program
* @xs: pointer to transformer state struct
**/
-static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
+static void ixgbevf_ipsec_del_sa(struct net_device *dev,
+ struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 837295fecd17..50f7c59e8a04 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,7 +34,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
- select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 416a926a8281..a7872d14a49d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5173,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct mvpp2_port *port = netdev_priv(dev);
void __iomem *ptp;
u32 gcr, int_mask;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
int_mask = gcr = 0;
- if (config.tx_type != HWTSTAMP_TX_OFF) {
+ if (config->tx_type != HWTSTAMP_TX_OFF) {
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
MVPP22_PTP_INT_MASK_QUEUE0;
}
/* It seems we must also release the TX reset when enabling the TSU */
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET;
if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
mvpp22_tai_start(port->priv->tai);
- if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
mvpp2_modify(ptp + MVPP22_PTP_GCR,
MVPP22_PTP_GCR_RX_RESET |
MVPP22_PTP_GCR_TX_RESET |
@@ -5225,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
mvpp22_tai_stop(port->priv->tai);
- port->tx_hwtstamp_type = config.tx_type;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ port->tx_hwtstamp_type = config->tx_type;
return 0;
}
-static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+static int mvpp2_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
-
- memset(&config, 0, sizeof(config));
+ struct mvpp2_port *port = netdev_priv(dev);
- config.tx_type = port->tx_hwtstamp_type;
- config.rx_filter = port->rx_hwtstamp ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ config->tx_type = port->tx_hwtstamp_type;
+ config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
return 0;
}
@@ -5274,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
- switch (cmd) {
- case SIOCSHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_set_ts_config(port, ifr);
- break;
-
- case SIOCGHWTSTAMP:
- if (port->hwtstamp)
- return mvpp2_get_ts_config(port, ifr);
- break;
- }
-
if (!port->phylink)
return -ENOTSUPP;
@@ -5799,6 +5785,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_set_features = mvpp2_set_features,
.ndo_bpf = mvpp2_xdp,
.ndo_xdp_xmit = mvpp2_xdp_xmit,
+ .ndo_hwtstamp_get = mvpp2_hwtstamp_get,
+ .ndo_hwtstamp_set = mvpp2_hwtstamp_set,
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index ccb69bc5c952..420c3f4cf741 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -18,8 +18,6 @@
#include "octep_vf_config.h"
#include "octep_vf_main.h"
-struct workqueue_struct *octep_vf_wq;
-
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
index 1a352f41f823..b9f13506f462 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -320,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
-extern struct workqueue_struct *octep_vf_wq;
-
int octep_vf_device_setup(struct octep_vf_device *oct);
int octep_vf_setup_iqs(struct octep_vf_device *oct);
void octep_vf_free_iqs(struct octep_vf_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 0b27a695008b..971993586fb4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -717,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 1e5aa5397504..7d21905deed8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -188,14 +188,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
- struct device *sender = &mbox->pdev->dev;
while (!time_after(jiffies, timeout)) {
if (mdev->num_msgs == mdev->msgs_acked)
return 0;
usleep_range(800, 1000);
}
- dev_dbg(sender, "timed out while waiting for rsp\n");
+ trace_otx2_msg_wait_rsp(mbox->pdev);
return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
@@ -219,6 +218,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ struct mbox_msghdr *msg;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
@@ -251,7 +251,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
- trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+ msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size,
+ msg->id, msg->pcifunc);
spin_unlock(&mdev->mbox_lock);
@@ -445,6 +448,14 @@ const char *otx2_mbox_id2name(u16 id)
#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CGX_MESSAGES
+#undef M
+
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
+ MBOX_UP_CPT_MESSAGES
+#undef M
default:
return "INVALID ID";
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 005ca8a056c0..a213b2663583 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -524,6 +524,8 @@ struct get_hw_cap_rsp {
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
u8 npc_hash_extract; /* Is hash extract supported */
+#define HW_CAP_MACSEC BIT_ULL(1)
+ u64 hw_caps;
};
/* CGX mbox message formats */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index 655dd4726d36..0277d226293e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 6575c422635b..a8025f0486c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
- if (blktype == BLKTYPE_NIX)
- rvu_nix_reset_mac(pfvf, pcifunc);
block = &hw->block[blkaddr];
@@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (lf < 0) /* This should never happen */
continue;
+ if (blktype == BLKTYPE_NIX) {
+ rvu_nix_reset_mac(pfvf, pcifunc);
+ rvu_npc_clear_ucast_entry(rvu, pcifunc, lf);
+ }
/* Disable the LF */
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
(lf << block->lfshift), 0x00ULL);
@@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_shaping = hw->cap.nix_shaping;
rsp->npc_hash_extract = hw->cap.npc_hash_extract;
+ if (rvu->mcs_blk_cnt)
+ rsp->hw_caps = HW_CAP_MACSEC;
+
return 0;
}
@@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
- trace_otx2_msg_process(mbox->pdev, _id, err); \
+ trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 60f085b00a8c..48f66292ad5c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -969,8 +969,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable);
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -996,6 +994,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf);
+
bool is_npc_intf_tx(u8 intf);
bool is_npc_intf_rx(u8 intf);
bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 992fa0b82e8d..d0331b0e0bfd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \
return req; \
}
@@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
+
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..4a3370a40dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -13,19 +13,26 @@
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
-#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
+#define LMT_MAX_VFS 256
+
+#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
+#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
- u64 tbl_base;
+ u64 tbl_base, cfg;
+ int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ vfs = 1 << (cfg & 0xF);
+ pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
+
+ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
+ /* 2048 LMTLINES */
+ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
+
+ writeq(cfg, (lmt_map_base + (index + 8)));
+
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & 0x1F;
+ pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a1f9ec03c2ce..c827da626471 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
+ int apr_pfs, apr_vfs;
int buf_size = 10240;
size_t off = 0;
int index = 0;
@@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ apr_vfs = 1 << (val & 0xF);
+ apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
+ LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
kfree(buf);
@@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
(tbl_base + index));
lmt_addr = readq(lmt_map_base + index);
@@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
/* Reading num of VFs per PF */
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
for (vf = 0; vf < num_vfs; vf++) {
- index = (pf * rvu->hw->total_vfs * 16) +
+ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
off += scnprintf(&buf[off], buf_size - 1 - off,
"PF%d:VF%d \t\t", pf, vf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 821fe242f821..da15bb451178 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -820,24 +820,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
- bool enable)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
- return;
-
- /* Get 'pcifunc' of PF device */
- pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
-
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
- NIXLF_BCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
-}
-
void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan)
{
@@ -1125,6 +1107,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int index, blkaddr;
@@ -1133,9 +1116,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
return;
/* Ucast MCAM match entry of this PF/VF */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC),
+ pfvf->nix_rx_intf)) {
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ }
/* Nothing to do for VFs, on platforms where pkt replication
* is not supported
@@ -3588,3 +3574,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
return 0;
}
+
+void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int ucast_idx, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false);
+
+ npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0);
+
+ npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == ucast_idx) {
+ list_del(&rule->list);
+ kfree(rule);
+ break;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 052ae5923e3a..32953cca108c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+ otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+
mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
index 775fd4c35794..19e0d16b12f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -11,3 +11,5 @@
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status);
+EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 5704520f9b02..4cd0fc4b0d20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -18,33 +18,42 @@
#include "mbox.h"
TRACE_EVENT(otx2_msg_alloc,
- TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
- TP_ARGS(pdev, id, size),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc),
+ TP_ARGS(pdev, id, size, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(u64, size)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->size = size;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->size)
+ TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id), __entry->size,
+ __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_send,
- TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
- TP_ARGS(pdev, num_msgs, msg_size),
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size,
+ u16 id, u16 pcifunc),
+ TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, num_msgs)
__field(u64, msg_size)
+ __field(u16, id)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
+ __entry->id = id;
+ __entry->pcifunc = pcifunc;
),
- TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
- __entry->num_msgs, __entry->msg_size)
+ TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n",
+ __get_str(dev), __entry->num_msgs, __entry->msg_size,
+ otx2_mbox_id2name(__entry->id), __entry->pcifunc)
);
TRACE_EVENT(otx2_msg_check,
@@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt,
);
TRACE_EVENT(otx2_msg_process,
- TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
- TP_ARGS(pdev, id, err),
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc),
+ TP_ARGS(pdev, id, err, pcifunc),
TP_STRUCT__entry(__string(dev, pci_name(pdev))
__field(u16, id)
__field(int, err)
+ __field(u16, pcifunc)
),
TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->err = err;
+ __entry->pcifunc = pcifunc;
+ ),
+ TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev),
+ otx2_mbox_id2name(__entry->id),
+ __entry->err, __entry->pcifunc)
+);
+
+TRACE_EVENT(otx2_msg_wait_rsp,
+ TP_PROTO(const struct pci_dev *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ ),
+ TP_fast_assign(__assign_str(dev)
+ ),
+ TP_printk("[%s] timed out while waiting for response\n",
+ __get_str(dev))
+);
+
+TRACE_EVENT(otx2_msg_status,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs),
+ TP_ARGS(pdev, msg, num_msgs),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u16, num_msgs)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->num_msgs = num_msgs;
+ ),
+ TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev),
+ __get_str(str), __entry->num_msgs)
+);
+
+TRACE_EVENT(otx2_parse_dump,
+ TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word),
+ TP_ARGS(pdev, msg, word),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, w0)
+ __field(u64, w1)
+ __field(u64, w2)
+ __field(u64, w3)
+ __field(u64, w4)
+ __field(u64, w5)
+ ),
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
+ __entry->w0 = *(word + 0);
+ __entry->w1 = *(word + 1);
+ __entry->w2 = *(word + 2);
+ __entry->w3 = *(word + 3);
+ __entry->w4 = *(word + 4);
+ __entry->w5 = *(word + 5);
),
- TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev),
- otx2_mbox_id2name(__entry->id), __entry->err)
+ TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n",
+ __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2,
+ __entry->w3, __entry->w4, __entry->w5)
);
#endif /* __RVU_TRACE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index c3b6e0f60a79..7f6a435ac680 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -357,9 +357,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
- for (qidx = 0; qidx < hw->rx_queues; qidx++)
- cn10k_map_unmap_rq_policer(pfvf, qidx,
- hw->matchall_ipolicer, false);
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
+ if (rc)
+ dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
+ qidx, rc);
+ }
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index fc59e50bafce..a6500e3673f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -663,10 +663,10 @@ static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
return -EOPNOTSUPP;
}
-static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
+static int cn10k_ipsec_outb_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.dev;
struct cn10k_tx_sa_s *sa_entry;
struct qmem *sa_info;
struct otx2_nic *pf;
@@ -676,7 +676,7 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
if (err)
return err;
- pf = netdev_priv(netdev);
+ pf = netdev_priv(dev);
err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
if (err)
@@ -700,18 +700,18 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
return 0;
}
-static int cn10k_ipsec_add_state(struct xfrm_state *x,
+static int cn10k_ipsec_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return cn10k_ipsec_inb_add_state(x, extack);
else
- return cn10k_ipsec_outb_add_state(x, extack);
+ return cn10k_ipsec_outb_add_state(dev, x, extack);
}
-static void cn10k_ipsec_del_state(struct xfrm_state *x)
+static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x)
{
- struct net_device *netdev = x->xso.dev;
struct cn10k_tx_sa_s *sa_entry;
struct qmem *sa_info;
struct otx2_nic *pf;
@@ -720,7 +720,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
return;
- pf = netdev_priv(netdev);
+ pf = netdev_priv(dev);
sa_info = (struct qmem *)x->xso.offload_handle;
sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
@@ -732,7 +732,7 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
err = cn10k_outb_write_sa(pf, sa_info);
if (err)
- netdev_err(netdev, "Error (%d) deleting SA\n", err);
+ netdev_err(dev, "Error (%d) deleting SA\n", err);
x->xso.offload_handle = 0;
qmem_free(pf->dev, sa_info);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index f3b9daffaec3..4c7e0f345cb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
if (sw_tx_sc->encrypt)
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
+ pfvf->netdev->mtu + OTX2_ETH_HLEN);
/* Write SecTag excluding AN bits(1..0) */
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 84cd029a85aa..6f572589f1e5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -2055,6 +2055,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct otx2_hw *hw = &pfvf->hw;
+ struct get_hw_cap_rsp *rsp;
+ struct msg_req *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_get_hw_cap(mbox);
+ if (!req)
+ goto fail;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (rsp->hw_caps & HW_CAP_MACSEC)
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 1e88422825be..ca0e6ab12ceb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -356,6 +356,7 @@ struct otx2_flow_config {
struct list_head flow_list_tc;
u8 ucast_flt_cnt;
bool ntuple;
+ u16 ntuple_cnt;
};
struct dev_hw_ops {
@@ -631,9 +632,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
__set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
}
-
- if (is_dev_cn10kb(pfvf->pdev))
- __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -871,6 +869,7 @@ static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
{ \
struct _req_type *req; \
+ u16 pcifunc = mbox->pfvf->pcifunc; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&mbox->mbox, 0, sizeof(struct _req_type), \
@@ -879,7 +878,8 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
- trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
+ req->hdr.pcifunc = pcifunc; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \
return req; \
}
@@ -1043,6 +1043,7 @@ void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
+int otx2_set_hw_capabilities(struct otx2_nic *pfvf);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1107,6 +1108,8 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
u64 iova, int len, u16 qidx, u16 flags);
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 33ec9a7f7c03..e13ae5484c19 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
if (!pfvf->flow_cfg)
return 0;
+ pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 010385b29988..45b8c9230184 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
@@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
return -EOPNOTSUPP;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause)
@@ -941,8 +941,8 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- /* LBK link is internal and always UP */
- if (is_otx2_lbkvf(pfvf->pdev))
+ /* LBK and SDP links are internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 1;
return pfvf->linfo.link_up;
}
@@ -1413,7 +1413,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- if (is_otx2_lbkvf(pfvf->pdev)) {
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = SPEED_100000;
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 47bfd1fb37d4..64c6d9162ef6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
@@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+ pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index cfed9ec5b157..db7c466fdc39 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -465,6 +465,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)",
+ vf_mbox->num_msgs);
+
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
offset);
@@ -473,7 +476,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
goto inval_msg;
/* Set VF's number in each of the msg */
- msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc &= ~RVU_PFVF_FUNC_MASK;
msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
offset = msg->next_msgoff;
}
@@ -503,6 +506,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)",
+ vf_mbox->up_num_msgs);
+
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -819,6 +825,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
+ trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
@@ -974,6 +983,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)",
+ num_msgs);
+
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
@@ -1023,6 +1035,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)",
+ hdr->num_msgs);
}
if (mbox_data & MBOX_DOWN_MSG) {
@@ -1039,6 +1054,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
BIT_ULL(0));
+
+ trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)",
+ hdr->num_msgs);
}
return IRQ_HANDLED;
@@ -3048,7 +3066,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -3057,7 +3075,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -3067,10 +3085,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -3128,6 +3144,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ otx2_set_hw_capabilities(pf);
+
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
@@ -3246,8 +3264,6 @@ err_detach_rsrc:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -3289,6 +3305,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req = (struct cgx_link_info_msg *)msghdr;
req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = pf->pcifunc;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
@@ -3447,8 +3464,6 @@ static void otx2_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2_pf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 0a6bb346ba45..99ace381cc78 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -335,6 +335,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ u64 *word = (u64 *)parse;
void *end, *start;
u32 metasize = 0;
u64 *seg_addr;
@@ -342,9 +343,12 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
int seg;
if (unlikely(parse->errlev || parse->errcode)) {
- if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
+ if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) {
+ trace_otx2_parse_dump(pfvf->pdev, "Err:", word);
return;
+ }
}
+ trace_otx2_parse_dump(pfvf->pdev, "", word);
if (pfvf->xdp_prog)
if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
@@ -1410,9 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
- struct xdp_frame *xdpf,
- u64 dma_addr, int len, int *offset, u16 flags)
+void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7ef3ba477d49..8a8b598bd389 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -136,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
- rsp->hdr.pcifunc = 0;
+ rsp->hdr.pcifunc = req->pcifunc;
rsp->hdr.rc = 0;
err = otx2_mbox_up_handler_cgx_link_event(
vf, (struct cgx_link_info_msg *)req, rsp);
@@ -548,7 +548,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -557,7 +557,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ return err;
}
pci_set_master(pdev);
@@ -565,10 +565,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qcount = num_online_cpus();
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
- if (!netdev) {
- err = -ENOMEM;
- goto err_release_regions;
- }
+ if (!netdev)
+ return -ENOMEM;
pci_set_drvdata(pdev, netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -729,9 +727,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
#ifdef CONFIG_DCB
- err = otx2_dcbnl_set_ops(netdev);
- if (err)
- goto err_free_zc_bmap;
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
#endif
otx2_qos_init(vf, qos_txqs);
@@ -765,8 +766,6 @@ err_free_irq_vectors:
err_free_netdev:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-err_release_regions:
- pci_release_regions(pdev);
return err;
}
@@ -815,8 +814,6 @@ static void otx2vf_remove(struct pci_dev *pdev)
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
-
- pci_release_regions(pdev);
}
static struct pci_driver otx2vf_driver = {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
index ce10caea8511..b328aae23d73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -11,6 +11,7 @@
#include <net/xdp.h>
#include "otx2_common.h"
+#include "otx2_struct.h"
#include "otx2_xsk.h"
int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
@@ -196,11 +197,39 @@ void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int
sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
}
+static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
+ u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset;
+
+ sq = &pfvf->qset.sq[qidx];
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+}
+
void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
int queue, int budget)
{
struct xdp_desc *xdp_desc = pool->tx_descs;
- int err, i, work_done = 0, batch;
+ int i, batch;
budget = min(budget, otx2_read_free_sqe(pfvf, queue));
batch = xsk_tx_peek_release_desc_batch(pool, budget);
@@ -211,15 +240,6 @@ void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
dma_addr_t dma_addr;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
- err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len,
- queue, OTX2_AF_XDP_FRAME);
- if (!err) {
- netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err);
- break;
- }
- work_done++;
+ otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue);
}
-
- if (work_done)
- xsk_tx_release(pool);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 35acc07bd964..5765bac119f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -1638,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
if (!node->is_static)
dwrr_del_node = true;
+ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
/* destroy the leaf node */
otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
@@ -1682,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
}
kfree(new_cfg);
- /* update tx_real_queues */
- otx2_qos_update_tx_netdev_queues(pfvf);
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index c5dbae0e513b..58d572ce08ef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -256,6 +256,26 @@ out:
return err;
}
+static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
+{
+ struct ndc_sync_op *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->nix_lf_tx_sync = true;
+ req->npa_lf_sync = true;
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -285,6 +305,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
+ /* NIX/NPA NDC sync */
+ otx2_qos_nix_npa_ndc_sync(pfvf);
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 7153a71dfc86..2cd3da3b6843 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -765,7 +765,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pci_request_regions(pdev, DRV_NAME);
+ err = pcim_request_all_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
@@ -774,7 +774,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
- goto err_release_regions;
+ goto err_set_drv_data;
}
pci_set_master(pdev);
@@ -782,7 +782,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
- goto err_release_regions;
+ goto err_set_drv_data;
}
pci_set_drvdata(pdev, priv);
@@ -799,7 +799,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = otx2_init_rsrc(pdev, priv);
if (err)
- goto err_release_regions;
+ goto err_set_drv_data;
priv->iommu_domain = iommu_get_domain_for_dev(dev);
@@ -822,9 +822,8 @@ err_detach_rsrc:
otx2_disable_mbox_intr(priv);
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(pdev);
-err_release_regions:
+err_set_drv_data:
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
return err;
}
@@ -844,7 +843,6 @@ static void rvu_rep_remove(struct pci_dev *pdev)
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
}
static struct pci_driver rvu_rep_driver = {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
index 4cd53a2dae46..634f4543c1d7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
static void prestera_counter_stats_work(struct work_struct *work)
{
- struct delayed_work *dl_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *dl_work = to_delayed_work(work);
struct prestera_counter *counter =
container_of(dl_work, struct prestera_counter, stats_dw);
struct prestera_counter_block *block;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 35857dc19542..c45d108b2f6d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_enable_device;
}
- err = pci_request_regions(pdev, driver_name);
+ err = pcim_request_all_regions(pdev, driver_name);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_pci_request_regions;
}
@@ -938,7 +938,6 @@ err_pci_dev_alloc:
err_pp_ioremap:
err_mem_ioremap:
err_dma_mask:
- pci_release_regions(pdev);
err_pci_request_regions:
err_pci_enable_device:
return err;
@@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
- pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7c27a19c4d8f..b4c01e2878f6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -14,7 +14,7 @@
struct mtk_eth_muxc {
const char *name;
- int cap_bit;
+ u64 cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
};
@@ -31,6 +31,8 @@ static const char *mtk_eth_path_name(u64 path)
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_2P5GPHY:
+ return "gmac2_2p5gphy";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
@@ -127,6 +129,29 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
return 0;
}
+static int set_mux_gmac2_to_2p5gphy(struct mtk_eth *eth, u64 path)
+{
+ int ret;
+
+ if (path == MTK_ETH_PATH_GMAC2_2P5GPHY) {
+ ret = regmap_clear_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_GMAC2_V2);
+ if (ret)
+ return ret;
+
+ /* Setup mux to 2p5g PHY */
+ ret = regmap_clear_bits(eth->infra, TOP_MISC_NETSYS_PCS_MUX,
+ MUX_G2_USXGMII_SEL);
+ if (ret)
+ return ret;
+
+ dev_dbg(eth->dev, "path %s in %s updated\n",
+ mtk_eth_path_name(path), __func__);
+ }
+
+ return 0;
+}
+
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
@@ -210,6 +235,10 @@ static const struct mtk_eth_muxc mtk_eth_muxc[] = {
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
+ .name = "mux_gmac2_to_2p5gphy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_TO_2P5GPHY,
+ .set_path = set_mux_gmac2_to_2p5gphy,
+ }, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
@@ -260,6 +289,20 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
return mtk_eth_mux_setup(eth, path);
}
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path = 0;
+
+ if (mac_id == MTK_GMAC2_ID)
+ path = MTK_ETH_PATH_GMAC2_2P5GPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path = 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 22a532695fb0..b38e4f2de674 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -503,7 +503,7 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
- mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_MODE(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
@@ -532,6 +532,26 @@ static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
return NULL;
}
+static int mtk_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ if (mtk_interface_mode_is_xgmii(eth, iface) &&
+ mac->id != MTK_GMAC1_ID) {
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE,
+ XMAC_MCR_TRX_DISABLE, MTK_XMAC_MCR(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_MODE(mac->id) |
+ MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_MODE(mac->id), MTK_XGMAC_STS(mac->id));
+ }
+
+ return 0;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -573,6 +593,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
+ if (mac->id == MTK_GMAC2_ID &&
+ MTK_HAS_CAPS(eth->soc->caps, MTK_2P5GPHY)) {
+ err = mtk_gmac_2p5gphy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
break;
default:
goto err_phy;
@@ -644,12 +670,12 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
/* Setup gmac */
- if (mtk_is_netsys_v3_or_greater(eth) &&
- mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ if (mtk_interface_mode_is_xgmii(eth, state->interface)) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
- mtk_setup_bridge_switch(eth);
+ if (mac->id == MTK_GMAC1_ID)
+ mtk_setup_bridge_switch(eth);
}
return;
@@ -696,10 +722,19 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ if (!mtk_interface_mode_is_xgmii(mac->hw, interface)) {
+ /* GMAC modes */
+ mtk_m32(mac->hw,
+ MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK, 0,
+ MTK_MAC_MCR(mac->id));
+ } else if (mac->id != MTK_GMAC1_ID) {
+ /* XGMAC except for built-in switch */
+ mtk_m32(mac->hw, XMAC_MCR_TRX_DISABLE, XMAC_MCR_TRX_DISABLE,
+ MTK_XMAC_MCR(mac->id));
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id), 0,
+ MTK_XGMAC_STS(mac->id));
+ }
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
@@ -771,13 +806,12 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
-static void mtk_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+static void mtk_gdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
@@ -811,6 +845,56 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_xgdm_mac_link_up(struct mtk_mac *mac,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ u32 mcr;
+
+ if (mac->id == MTK_GMAC1_ID)
+ return;
+
+ /* Eliminate the interference(before link-up) caused by PHY noise */
+ mtk_m32(mac->hw, XMAC_LOGIC_RST, 0, MTK_XMAC_LOGIC_RST(mac->id));
+ mdelay(20);
+ mtk_m32(mac->hw, XMAC_GLB_CNTCLR, XMAC_GLB_CNTCLR,
+ MTK_XMAC_CNT_CTRL(mac->id));
+
+ mtk_m32(mac->hw, MTK_XGMAC_FORCE_LINK(mac->id),
+ MTK_XGMAC_FORCE_LINK(mac->id), MTK_XGMAC_STS(mac->id));
+
+ mcr = mtk_r32(mac->hw, MTK_XMAC_MCR(mac->id));
+ mcr &= ~(XMAC_MCR_FORCE_TX_FC | XMAC_MCR_FORCE_RX_FC |
+ XMAC_MCR_TRX_DISABLE);
+ /* Configure pause modes -
+ * phylink will avoid these for half duplex
+ */
+ if (tx_pause)
+ mcr |= XMAC_MCR_FORCE_TX_FC;
+ if (rx_pause)
+ mcr |= XMAC_MCR_FORCE_RX_FC;
+
+ mtk_w32(mac->hw, mcr, MTK_XMAC_MCR(mac->id));
+}
+
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+
+ if (mtk_interface_mode_is_xgmii(mac->hw, interface))
+ mtk_xgdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+ else
+ mtk_gdm_mac_link_up(mac, phy, mode, interface, speed, duplex,
+ tx_pause, rx_pause);
+}
+
static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
@@ -828,6 +912,9 @@ static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
struct mtk_eth *eth = mac->hw;
u32 val;
+ if (mtk_interface_mode_is_xgmii(eth, mac->interface))
+ return -EOPNOTSUPP;
+
/* Tx idle timer in ms */
timer = DIV_ROUND_UP(timer, 1000);
@@ -858,6 +945,7 @@ static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
}
static const struct phylink_mac_ops mtk_phylink_ops = {
+ .mac_prepare = mtk_mac_prepare,
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
@@ -4748,7 +4836,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
@@ -4768,6 +4856,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink = phylink;
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_2P5GPHY) &&
+ id == MTK_GMAC2_ID)
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 88ef2e9c50fc..6f72a8c8ae1e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -431,7 +431,8 @@
/* XMAC status registers */
#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
-#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_MODE(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(27) : BIT(11))
#define MTK_USXGMII_PCS_LINK BIT(8)
#define MTK_XGMAC_RX_FC BIT(5)
#define MTK_XGMAC_TX_FC BIT(4)
@@ -524,6 +525,21 @@
#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
#define INTF_MODE_RGMII_10_100 0
+/* XFI Mac control registers */
+#define MTK_XMAC_BASE(x) (0x12000 + (((x) - 1) * 0x1000))
+#define MTK_XMAC_MCR(x) (MTK_XMAC_BASE(x))
+#define XMAC_MCR_TRX_DISABLE 0xf
+#define XMAC_MCR_FORCE_TX_FC BIT(5)
+#define XMAC_MCR_FORCE_RX_FC BIT(4)
+
+/* XFI Mac logic reset registers */
+#define MTK_XMAC_LOGIC_RST(x) (MTK_XMAC_BASE(x) + 0x10)
+#define XMAC_LOGIC_RST BIT(0)
+
+/* XFI Mac count global control */
+#define MTK_XMAC_CNT_CTRL(x) (MTK_XMAC_BASE(x) + 0x100)
+#define XMAC_GLB_CNTCLR BIT(0)
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -587,6 +603,10 @@
#define GEPHY_MAC_SEL BIT(1)
/* Top misc registers */
+#define TOP_MISC_NETSYS_PCS_MUX 0x0
+#define NETSYS_PCS_MUX_MASK GENMASK(1, 0)
+#define MUX_G2_USXGMII_SEL BIT(1)
+
#define USB_PHY_SWITCH_REG 0x218
#define QPHY_SEL_MASK GENMASK(1, 0)
#define SGMII_QPHY_SEL 0x2
@@ -951,6 +971,7 @@ enum mkt_eth_capabilities {
MTK_RGMII_BIT = 0,
MTK_TRGMII_BIT,
MTK_SGMII_BIT,
+ MTK_2P5GPHY_BIT,
MTK_ESW_BIT,
MTK_GEPHY_BIT,
MTK_MUX_BIT,
@@ -971,6 +992,7 @@ enum mkt_eth_capabilities {
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT,
MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
@@ -980,6 +1002,7 @@ enum mkt_eth_capabilities {
MTK_ETH_PATH_GMAC1_SGMII_BIT,
MTK_ETH_PATH_GMAC2_RGMII_BIT,
MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_2P5GPHY_BIT,
MTK_ETH_PATH_GMAC2_GEPHY_BIT,
MTK_ETH_PATH_GDM1_ESW_BIT,
};
@@ -988,6 +1011,7 @@ enum mkt_eth_capabilities {
#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_2P5GPHY BIT_ULL(MTK_2P5GPHY_BIT)
#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
@@ -1010,6 +1034,8 @@ enum mkt_eth_capabilities {
BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC2_TO_2P5GPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_TO_2P5GPHY_BIT)
#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
@@ -1021,6 +1047,7 @@ enum mkt_eth_capabilities {
#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_2P5GPHY BIT_ULL(MTK_ETH_PATH_GMAC2_2P5GPHY_BIT)
#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
@@ -1030,6 +1057,7 @@ enum mkt_eth_capabilities {
#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GMAC2_2P5GPHY (MTK_ETH_PATH_GMAC2_2P5GPHY | MTK_2P5GPHY)
#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
/* MUXes present on SoCs */
@@ -1049,6 +1077,10 @@ enum mkt_eth_capabilities {
(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
MTK_SHARED_SGMII)
+/* 2: GMAC2 -> 2P5GPHY */
+#define MTK_MUX_GMAC2_TO_2P5GPHY \
+ (MTK_ETH_MUX_GMAC2_TO_2P5GPHY | MTK_MUX | MTK_INFRA)
+
/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
@@ -1084,8 +1116,9 @@ enum mkt_eth_capabilities {
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
MTK_RSTCTRL_PPE1 | MTK_SRAM)
-#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
- MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_GMAC2_2P5GPHY | \
+ MTK_MUX_GMAC2_TO_2P5GPHY | MTK_QDMA | MTK_RSTCTRL_PPE1 | \
+ MTK_RSTCTRL_PPE2 | MTK_SRAM)
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
@@ -1145,7 +1178,7 @@ struct mtk_reg_map {
};
/* struct mtk_eth_data - This is the structure holding all differences
- * among various plaforms
+ * among various platforms
* @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
@@ -1245,7 +1278,7 @@ struct mtk_soc_data {
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
* @state: Initialization and runtime state of the device
- * @soc: Holding specific data among vaious SoCs
+ * @soc: Holding specific data among various SoCs
*/
struct mtk_eth {
@@ -1437,6 +1470,23 @@ static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
return MTK_FOE_IB2_MULTICAST;
}
+static inline bool mtk_interface_mode_is_xgmii(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ return false;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_5GBASER:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
@@ -1445,6 +1495,7 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_2p5gphy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index e212a4ba9275..351dd152f4f3 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2000,7 +2000,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
if (mtk_wed_is_v3_or_greater(dev->hw))
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
- /* initail tx interrupt trigger */
+ /* initial tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
@@ -2011,7 +2011,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
dev->wlan.tx_tbit[1]));
- /* initail txfree interrupt trigger */
+ /* initial txfree interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index cd17a3f4faf8..a68cd3f0304c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1897,6 +1897,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d7444782bfdd..698a5d1f0d7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -106,7 +106,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits),
GFP_KERNEL);
buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 568bbe5f83f5..d292e6a9e22c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -154,7 +154,8 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
steering/hws/vport.o \
steering/hws/bwc_complex.o \
steering/hws/fs_hws_pools.o \
- steering/hws/fs_hws.o
+ steering/hws/fs_hws.o \
+ steering/hws/action_ste_pool.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e53dbdc0a7a1..b1aeea7c4a91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -927,8 +927,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
static void cb_timeout_handler(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_cmd_work_ent *ent = container_of(dwork,
struct mlx5_cmd_work_ent,
cb_timeout_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 32ed4963b8ad..5b0d03b3efe8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -520,6 +520,12 @@ struct mlx5e_xdpsq {
struct mlx5e_channel *channel;
} ____cacheline_aligned_in_smp;
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_ktls_resync_resp;
struct mlx5e_icosq {
@@ -716,6 +722,7 @@ struct mlx5e_rq {
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
+ struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
struct xsk_buff_pool *xsk_pool;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 81523825faa2..cb972b2d46e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -114,6 +114,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
int err = 0;
rtnl_lock();
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -123,6 +124,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
out:
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 131ed97ca997..5d0014129a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -8,6 +8,7 @@
#include "en/fs_tt_redirect.h"
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <net/netdev_lock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -449,8 +450,22 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
{
struct mlx5e_ptpsq *ptpsq =
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ /* Recovering the PTP SQ means re-enabling NAPI, which requires the
+ * netdev instance lock. However, SQ closing has to wait for this work
+ * task to finish while also holding the same lock. So either get the
+ * lock or find that the SQ is no longer enabled and thus this work is
+ * not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
+ netdev_unlock(sq->netdev);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -892,7 +907,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
+ netif_napi_add_locked(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -910,7 +925,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
return 0;
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparams);
kvfree(c);
@@ -920,7 +935,7 @@ err_free:
void mlx5e_ptp_close(struct mlx5e_ptp *c)
{
mlx5e_ptp_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -929,7 +944,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
@@ -957,7 +972,7 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
}
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index dbd9482359e1..c3bda4612fa9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -107,9 +107,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
mlx5e_reset_txqsq_cc_pc(sq);
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
- rtnl_lock();
mlx5e_activate_txqsq(sq);
- rtnl_unlock();
if (sq->channel)
mlx5e_trigger_napi_icosq(sq->channel);
@@ -176,7 +174,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
priv = ptpsq->txqsq.priv;
- rtnl_lock();
mutex_lock(&priv->state_lock);
chs = &priv->channels;
netdev = priv->netdev;
@@ -196,7 +193,6 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
netif_carrier_on(netdev);
mutex_unlock(&priv->state_lock);
- rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 140606fcd23b..b5c19396e096 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -149,7 +149,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
+ netif_napi_add_locked(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -164,7 +164,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
err_close_trap_rq:
mlx5e_close_trap_rq(&t->rq);
err_napi_del:
- netif_napi_del(&t->napi);
+ netif_napi_del_locked(&t->napi);
kvfree(t);
return ERR_PTR(err);
}
@@ -173,13 +173,13 @@ void mlx5e_close_trap(struct mlx5e_trap *trap)
{
mlx5e_tir_destroy(&trap->tir);
mlx5e_close_trap_rq(&trap->rq);
- netif_napi_del(&trap->napi);
+ netif_napi_del_locked(&trap->napi);
kvfree(trap);
}
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
- napi_enable(&trap->napi);
+ napi_enable_locked(&trap->napi);
mlx5e_activate_rq(&trap->rq);
mlx5e_trigger_napi_sched(&trap->napi);
}
@@ -189,7 +189,7 @@ void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
struct mlx5e_trap *trap = priv->en_trap;
mlx5e_deactivate_rq(&trap->rq);
- napi_disable(&trap->napi);
+ napi_disable_locked(&trap->napi);
}
static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
@@ -285,6 +285,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ netdev_lock(priv->netdev);
switch (trap_ctx->action) {
case DEVLINK_TRAP_ACTION_TRAP:
err = mlx5e_handle_action_trap(priv, trap_ctx->id);
@@ -297,6 +298,7 @@ int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_
trap_ctx->action);
err = -EINVAL;
}
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index f803e1c93590..5ce1b463b7a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -707,8 +707,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
page = xdpi.page.page;
- /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
- * as we know this is a page_pool page.
+ /* No need to check page_pool_page_is_pp() as we
+ * know this is a page_pool page.
*/
page_pool_recycle_direct(page->pp, page);
} while (++n < num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 446e492c6bb8..46ab0a9e8cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -45,12 +45,6 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
-struct mlx5e_xdp_buff {
- struct xdp_buff xdp;
- struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
-};
-
/* XDP packets can be transmitted in different ways. On completion, we need to
* distinguish between them to clean up things in a proper way.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 2dd842aac6fc..77f61cd28a79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -259,8 +259,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
- struct xfrm_state *x = sa_entry->x;
- struct net_device *netdev;
+ struct net_device *netdev = sa_entry->dev;
struct neighbour *n;
u8 addr[ETH_ALEN];
const void *pkey;
@@ -270,8 +269,6 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->type != XFRM_DEV_OFFLOAD_PACKET)
return;
- netdev = x->xso.real_dev;
-
mlx5_query_mac_address(mdev, addr);
switch (attrs->dir) {
case XFRM_DEV_OFFLOAD_IN:
@@ -692,17 +689,17 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0;
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+static int mlx5e_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
- struct net_device *netdev = x->xso.real_dev;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
gfp_t gfp;
int err;
- priv = netdev_priv(netdev);
+ priv = netdev_priv(dev);
if (!priv->ipsec)
return -EOPNOTSUPP;
@@ -713,6 +710,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -ENOMEM;
sa_entry->x = x;
+ sa_entry->dev = dev;
sa_entry->ipsec = ipsec;
/* Check if this SA is originated from acquire flow temporary SA */
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
@@ -809,7 +807,7 @@ err_xfrm:
return err;
}
-static void mlx5e_xfrm_del_state(struct xfrm_state *x)
+static void mlx5e_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -822,7 +820,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
WARN_ON(old != sa_entry);
}
-static void mlx5e_xfrm_free_state(struct xfrm_state *x)
+static void mlx5e_xfrm_free_state(struct net_device *dev, struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
@@ -855,8 +853,6 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5e_ipsec *ipsec;
struct neighbour *n = ptr;
- struct net_device *netdev;
- struct xfrm_state *x;
unsigned long idx;
if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
@@ -876,11 +872,9 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
continue;
}
- x = sa_entry->x;
- netdev = x->xso.real_dev;
data = sa_entry->work->data;
- neigh_ha_snapshot(data->addr, n, netdev);
+ neigh_ha_snapshot(data->addr, n, sa_entry->dev);
queue_work(ipsec->wq, &sa_entry->work->work);
}
@@ -996,8 +990,8 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
+ lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&net->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
@@ -1170,7 +1164,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
+ struct net_device *netdev = x->xdo.dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
struct mlx5e_priv *priv;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index a63c2289f8af..ffcd0cdeb775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -274,6 +274,7 @@ struct mlx5e_ipsec_limits {
struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_esn_state esn_state;
struct xfrm_state *x;
+ struct net_device *dev;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8705cffc747f..5fe016e477b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1147,6 +1147,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
bool reset = true;
int err;
+ netdev_lock(priv->netdev);
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -1162,6 +1163,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
&trust_state, reset);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(priv->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index fdf9e9bb99ac..ea078c9f5d15 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1689,6 +1689,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
return 0;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -2059,14 +2060,9 @@ int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
if (err)
return err;
- dev_hold(dev);
- rtnl_unlock();
-
err = mlx5_firmware_flash(mdev, fw, NULL);
release_firmware(fw);
- rtnl_lock();
- dev_put(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 05058710d2c7..04a969128161 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -484,7 +484,9 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
}
/* Need to fix some features.. */
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
return err;
}
@@ -521,7 +523,9 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, fs->vlan->active_svlans);
mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3506024c2453..ea822c69d137 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
@@ -1903,7 +1904,20 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
+ /* Recovering queues means re-enabling NAPI, which requires the netdev
+ * instance lock. However, SQ closing flows have to wait for work tasks
+ * to finish while also holding the netdev instance lock. So either get
+ * the lock or find that the SQ is no longer enabled and thus this work
+ * is not relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return;
+ msleep(20);
+ }
+
mlx5e_reporter_tx_err_cqe(sq);
+ netdev_unlock(sq->netdev);
}
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
@@ -2705,8 +2719,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
- netif_napi_add_config(netdev, &c->napi, mlx5e_napi_poll, ix);
- netif_napi_set_irq(&c->napi, irq);
+ netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
+ netif_napi_set_irq_locked(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2728,7 +2742,7 @@ err_close_queues:
mlx5e_close_queues(c);
err_napi_del:
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
err_free:
kvfree(cparam);
@@ -2741,7 +2755,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
- napi_enable(&c->napi);
+ napi_enable_locked(&c->napi);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
@@ -2773,7 +2787,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_txqsq(&c->sq[tc]);
mlx5e_qos_deactivate_queues(c);
- napi_disable(&c->napi);
+ napi_disable_locked(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2782,7 +2796,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
mlx5e_qos_close_queues(c);
- netif_napi_del(&c->napi);
+ netif_napi_del_locked(&c->napi);
kvfree(c);
}
@@ -4276,7 +4290,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
if (!netdev->netdev_ops->ndo_bpf ||
params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
- xdp_clear_features_flag(netdev);
+ xdp_set_features_flag_locked(netdev, 0);
return;
}
@@ -4285,7 +4299,7 @@ void mlx5e_set_xdp_feature(struct net_device *netdev)
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
- xdp_set_features_flag(netdev, val);
+ xdp_set_features_flag_locked(netdev, val);
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
@@ -4349,6 +4363,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_MACSEC;
+ if (netdev->features & NETIF_F_HW_MACSEC)
+ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
+
return features;
}
@@ -4964,21 +4982,19 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues
- * through this flow. However, channel closing flows have to wait for
- * this work to finish while holding rtnl lock too. So either get the
- * lock or find that channels are being closed for other reason and
- * this work is not relevant anymore.
+ /* Recovering the TX queues implies re-enabling NAPI, which requires
+ * the netdev instance lock.
+ * However, channel closing flows have to wait for this work to finish
+ * while holding the same lock. So either get the lock or find that
+ * channels are being closed for other reason and this work is not
+ * relevant anymore.
*/
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(netdev)) {
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
return;
msleep(20);
}
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
-
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
@@ -4992,8 +5008,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
break;
}
-unlock:
- rtnl_unlock();
+ netdev_unlock(netdev);
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -5317,7 +5332,6 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *xskrq_stats;
struct mlx5e_rq_stats *rq_stats;
- ASSERT_RTNL();
if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
@@ -5337,7 +5351,6 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sq_stats *sq_stats;
- ASSERT_RTNL();
if (!priv->stats_nch)
return;
@@ -5358,7 +5371,6 @@ static void mlx5e_get_base_stats(struct net_device *dev,
struct mlx5e_ptp *ptp_channel;
int i, tc;
- ASSERT_RTNL();
if (!mlx5e_is_uplink_rep(priv)) {
rx->packets = 0;
rx->bytes = 0;
@@ -5454,6 +5466,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
mlx5e_dcbnl_build_netdev(netdev);
@@ -5835,9 +5849,11 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5850,9 +5866,16 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_dcbnl_delete_app(priv);
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_nic_set_rx_mode(priv);
@@ -5862,11 +5885,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_blocking_events(priv);
- if (priv->en_trap) {
- mlx5e_deactivate_trap(priv);
- mlx5e_close_trap(priv->en_trap);
- priv->en_trap = NULL;
- }
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -6121,7 +6139,9 @@ static void mlx5e_update_features(struct net_device *netdev)
return; /* features will be updated on netdev registration */
rtnl_lock();
+ netdev_lock(netdev);
netdev_update_features(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
}
@@ -6132,7 +6152,7 @@ static void mlx5e_reset_channels(struct net_device *netdev)
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
- const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
+ const bool need_lock = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
@@ -6174,15 +6194,19 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
*
- * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * Locking is required by netif_set_real_num_*_queues in case the
* netdev has been registered by this point (if this function was called
* in the reload or resume flow).
*/
- if (take_rtnl)
+ if (need_lock) {
rtnl_lock();
+ netdev_lock(priv->netdev);
+ }
err = mlx5e_num_channels_changed(priv);
- if (take_rtnl)
+ if (need_lock) {
+ netdev_unlock(priv->netdev);
rtnl_unlock();
+ }
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2abab241f03b..63a7a788fb0d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -33,6 +33,7 @@
#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
@@ -803,6 +804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -885,6 +887,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
{
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
eth_hw_addr_random(netdev);
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
@@ -1344,9 +1348,11 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
netdev->wanted_features |= NETIF_F_HW_TC;
rtnl_lock();
+ netdev_lock(netdev);
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
+ netdev_unlock(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1356,9 +1362,11 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
+ netdev_lock(priv->netdev);
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
+ netdev_unlock(priv->netdev);
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5fd70b4d55be..84b1ab8233b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1684,17 +1684,17 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf))
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1713,11 +1713,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_frag_page *frag_page;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -1737,8 +1737,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetch(va + rx_headroom);
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- frag_consumed_bytes, &mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ frag_consumed_bytes, mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1750,8 +1750,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page,
- wi->offset, frag_consumed_bytes);
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, wi->offset,
+ frag_consumed_bytes);
truesize += frag_info->frag_stride;
cqe_bcnt -= frag_consumed_bytes;
@@ -1760,7 +1761,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
@@ -1770,21 +1771,23 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
- mxbuf.xdp.data_end - mxbuf.xdp.data,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
+ mxbuf->xdp.data_end - mxbuf->xdp.data,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb))
return NULL;
skb_mark_for_recycle(skb);
head_wi->frag_page->frags++;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1984,10 +1987,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
- struct mlx5e_xdp_buff mxbuf;
unsigned int truesize = 0;
struct bpf_prog *prog;
struct sk_buff *skb;
@@ -2033,9 +2036,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
}
- mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz, linear_data_len, &mxbuf);
+ mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
+ linear_data_len, mxbuf);
- sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
@@ -2046,7 +2050,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
else
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf.xdp, frag_page, frag_offset,
+ mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
+ frag_page, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
@@ -2054,7 +2059,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
@@ -2067,10 +2072,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start,
- linear_frame_sz,
- mxbuf.xdp.data - mxbuf.xdp.data_hard_start, 0,
- mxbuf.xdp.data - mxbuf.xdp.data_meta);
+ skb = mlx5e_build_linear_skb(
+ rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL;
@@ -2080,13 +2085,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
wi->linear_page.frags++;
mlx5e_page_release_fragmented(rq, &wi->linear_page);
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, frag_page - head_page,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
pagep = head_page;
do
@@ -2097,12 +2103,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
} else {
dma_addr_t addr;
- if (xdp_buff_has_frags(&mxbuf.xdp)) {
+ if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
xdp_update_skb_shared_info(skb, sinfo->nr_frags,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
+ xdp_buff_is_frag_pfmemalloc(
+ &mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2152,20 +2159,20 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct mlx5e_xdp_buff mxbuf;
+ struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
- cqe_bcnt, &mxbuf);
- if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
+ cqe_bcnt, mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
- metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
- cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
+ metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
+ cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1c121b435016..19664fa7f217 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2424,8 +2424,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
- MLX5E_PTP_CHANNEL_IX);
+ ethtool_puts(data, ptp_rq_stats_desc[i].format);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 8de6fcbd3a03..def5dea1463d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -54,7 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
-#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq0_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 4fd853d19e31..55a8629f0792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -337,10 +337,11 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
};
}
-static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+static void mlx5e_tx_skb_update_ts_flags(struct sk_buff *skb)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
}
static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
@@ -392,7 +393,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
sq->pc += wi->num_wqebbs;
@@ -625,7 +626,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
- mlx5e_tx_skb_update_hwts_flags(skb);
+ mlx5e_tx_skb_update_ts_flags(skb);
if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a47c29571f64..1af76da8b132 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -527,7 +527,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
- int reformat_id = 0;
+ u32 reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
@@ -580,23 +580,21 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
-
- if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
- reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
- if (reformat_id < 0) {
- mlx5_core_err(dev,
- "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
- pkt_reformat->reformat_type);
- err = reformat_id;
- goto err_out;
- }
- } else {
- reformat_id = fte->act_dests.action.pkt_reformat->id;
+ struct mlx5_pkt_reformat *pkt_reformat =
+ fte->act_dests.action.pkt_reformat;
+
+ err = mlx5_fs_get_packet_reformat_id(pkt_reformat,
+ &reformat_id);
+ if (err) {
+ mlx5_core_err(dev,
+ "Unsupported pkt_reformat type (%d)\n",
+ pkt_reformat->reformat_type);
+ goto err_out;
}
}
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ reformat_id);
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6163bc98d94a..23a7e8e7adfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1830,14 +1830,35 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id)
+{
+ switch (pkt_reformat->owner) {
+ case MLX5_FLOW_RESOURCE_OWNER_FW:
+ *id = pkt_reformat->id;
+ return 0;
+ case MLX5_FLOW_RESOURCE_OWNER_SW:
+ return mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat, id);
+ case MLX5_FLOW_RESOURCE_OWNER_HWS:
+ return mlx5_fs_hws_action_get_pkt_reformat_id(pkt_reformat, id);
+ default:
+ return -EINVAL;
+ }
+}
+
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
- return p1->owner == p2->owner &&
- (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
- p1->id == p2->id :
- mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
- mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+ int err1, err2;
+ u32 id1, id2;
+
+ if (p1->owner != p2->owner)
+ return false;
+
+ err1 = mlx5_fs_get_packet_reformat_id(p1, &id1);
+ err2 = mlx5_fs_get_packet_reformat_id(p2, &id2);
+
+ return !err1 && !err2 && id1 == id2;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 0767239f651c..500826229b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -58,6 +58,7 @@ struct mlx5_flow_definer {
enum mlx5_flow_resource_owner {
MLX5_FLOW_RESOURCE_OWNER_FW,
MLX5_FLOW_RESOURCE_OWNER_SW,
+ MLX5_FLOW_RESOURCE_OWNER_HWS,
};
struct mlx5_modify_hdr {
@@ -386,6 +387,9 @@ u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace
struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
+int mlx5_fs_get_packet_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *id);
+
#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
#define fs_list_for_each_entry(pos, root) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0979d672d47f..79ae3a51a4b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -32,6 +32,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
+#include <net/netdev_lock.h>
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
@@ -102,6 +103,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
netdev->netdev_ops = &mlx5i_netdev_ops;
netdev->ethtool_ops = &mlx5i_ethtool_ops;
+ netdev->request_ops_lock = true;
+ netdev_lockdep_set_classes(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 65a94e46edcf..cec18efadc73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -643,13 +643,6 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
int pin = -1;
int err = 0;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -820,12 +813,6 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo
return 0;
}
-static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
-{
- return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
- (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
-}
-
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
@@ -861,12 +848,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
goto unlock;
}
- /* Reject requests with unsupported flags */
- if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) {
- err = -EOPNOTSUPP;
- goto unlock;
- }
-
if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
@@ -1034,6 +1015,13 @@ static void mlx5_init_pin_config(struct mlx5_core_dev *mdev)
clock->ptp_info.verify = mlx5_ptp_verify;
clock->ptp_info.pps = 1;
+ clock->ptp_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ clock->ptp_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
+
for (i = 0; i < clock->ptp_info.n_pins; i++) {
snprintf(clock->ptp_info.pin_config[i].name,
sizeof(clock->ptp_info.pin_config[i].name),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 2c5f850c31f6..40024cfa3099 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -148,7 +148,7 @@ out:
* Free the IRQ and other resources such as rmap from the system.
* BUT doesn't free or remove reference from mlx5.
* This function is very important for the shutdown flow, where we need to
- * cleanup system resoruces but keep mlx5 objects alive,
+ * cleanup system resources but keep mlx5 objects alive,
* see mlx5_irq_table_free_irqs().
*/
static void mlx5_system_free_irq(struct mlx5_irq *irq)
@@ -588,7 +588,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq;
unsigned long index;
- /* There are cases in which we are destrying the irq_table before
+ /* There are cases in which we are destroying the irq_table before
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
* which might not have been freed.
*/
@@ -617,7 +617,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec,
if (!mlx5_sf_max_functions(dev))
return 0;
if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
- mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+ mlx5_core_dbg(dev, "Not enough IRQs for SFs. SF may run at lower performance\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index b5332c54d4fb..fb62f3bc4bd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -72,6 +72,11 @@ enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
return action->type;
}
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action)
+{
+ return action->ctx->mdev;
+}
+
static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
enum mlx5hws_context_shared_stc_type stc_type,
u8 tbl_type)
@@ -238,6 +243,7 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
enum mlx5hws_table_type table_type,
bool is_mirror)
{
+ struct mlx5hws_pool *pool;
bool use_fixup = false;
u32 fw_tbl_type;
u32 base_id;
@@ -253,13 +259,11 @@ hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
use_fixup = true;
break;
}
+ pool = stc_attr->ste_table.ste_pool;
if (!is_mirror)
- base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_id(pool);
else
- base_id =
- mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
- &stc_attr->ste_table.ste);
+ base_id = mlx5hws_pool_get_base_mirror_id(pool);
*fixup_stc_attr = *stc_attr;
fixup_stc_attr->ste_table.ste_obj_id = base_id;
@@ -337,7 +341,7 @@ __must_hold(&ctx->ctrl_lock)
if (!mlx5hws_context_cap_dynamic_reparse(ctx))
stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_0_id = mlx5hws_pool_get_base_id(stc_pool);
/* According to table/action limitation change the stc_attr */
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
@@ -353,7 +357,7 @@ __must_hold(&ctx->ctrl_lock)
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
u32 obj_1_id;
- obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_1_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
&fixup_stc_attr,
@@ -393,11 +397,11 @@ __must_hold(&ctx->ctrl_lock)
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.stc_offset = stc->offset;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
}
@@ -1186,14 +1190,15 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
struct mlx5hws_action_mh_pattern *pattern,
u32 log_bulk_size)
{
+ u16 num_actions, max_mh_actions = 0, hw_max_actions;
struct mlx5hws_context *ctx = action->ctx;
- u16 num_actions, max_mh_actions = 0;
int i, ret, size_in_bytes;
u32 pat_id, arg_id = 0;
__be64 *new_pattern;
size_t pat_max_sz;
pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ hw_max_actions = pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE;
size_in_bytes = pat_max_sz * sizeof(__be64);
new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
if (!new_pattern)
@@ -1203,16 +1208,20 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
for (i = 0; i < num_of_patterns; i++) {
size_t new_num_actions;
size_t cur_num_actions;
- u32 nope_location;
+ u32 nop_locations;
cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
- mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
- pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
- &new_num_actions, &nope_location,
- &new_pattern[i * pat_max_sz]);
+ ret = mlx5hws_pat_calc_nop(pattern[i].data, cur_num_actions,
+ hw_max_actions, &new_num_actions,
+ &nop_locations,
+ &new_pattern[i * pat_max_sz]);
+ if (ret) {
+ mlx5hws_err(ctx, "Too many actions after nop insertion\n");
+ goto free_new_pat;
+ }
- action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.nop_locations = nop_locations;
action[i].modify_header.num_of_actions = new_num_actions;
max_mh_actions = max(max_mh_actions, new_num_actions);
@@ -1259,7 +1268,7 @@ hws_action_create_modify_header_hws(struct mlx5hws_action *action,
MLX5_GET(set_action_in, pattern[i].data, action_type);
} else {
/* Multiple modify actions require a pattern */
- if (unlikely(action[i].modify_header.nope_locations)) {
+ if (unlikely(action[i].modify_header.nop_locations)) {
size_t pattern_sz;
pattern_sz = action[i].modify_header.num_of_actions *
@@ -1575,17 +1584,15 @@ hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
return definer;
}
-static struct mlx5hws_matcher_action_ste *
+static struct mlx5hws_range_action_table *
hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
u32 miss_ft_id)
{
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
@@ -1604,7 +1611,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
pool_attr.alloc_log_sz = 1;
table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!table_ste->pool) {
@@ -1616,8 +1622,6 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_0_id = &table_ste->rtc_0_id;
rtc_1_id = &table_ste->rtc_1_id;
ste_pool = table_ste->pool;
- ste = &table_ste->ste;
- ste->order = 1;
rtc_attr.log_size = 0;
rtc_attr.log_depth = 0;
@@ -1629,18 +1633,16 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.fw_gen_wqe = true;
rtc_attr.is_scnd_range = true;
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(ste_pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
stc_pool = ctx->stc_pool;
- default_stc = ctx->common_res.default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
@@ -1650,11 +1652,11 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
}
/* Create mirror RTC */
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(stc_pool);
rtc_attr.stc_base = obj_id;
ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
@@ -1677,9 +1679,9 @@ free_ste:
return NULL;
}
-static void
-hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste)
+static void hws_action_destroy_dest_match_range_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste)
{
mutex_lock(&ctx->ctrl_lock);
@@ -1691,12 +1693,11 @@ hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
mutex_unlock(&ctx->ctrl_lock);
}
-static int
-hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
- struct mlx5hws_matcher_action_ste *table_ste,
- struct mlx5hws_action *hit_ft_action,
- struct mlx5hws_definer *range_definer,
- u32 min, u32 max)
+static int hws_action_create_dest_match_range_fill_table(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_range_action_table *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer, u32 min, u32 max)
{
struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
@@ -1792,7 +1793,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
u32 min, u32 max, u32 flags)
{
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
struct mlx5hws_action *action;
@@ -1837,7 +1838,6 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = table_ste->ste;
stc_attr.ste_table.ste_pool = table_ste->pool;
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
@@ -2110,21 +2110,23 @@ static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
u32 arg_idx,
u8 *arg_data,
u16 num_of_actions,
- u32 nope_locations)
+ u32 nop_locations)
{
u8 *new_arg_data = NULL;
int i, j;
- if (unlikely(nope_locations)) {
+ if (unlikely(nop_locations)) {
new_arg_data = kcalloc(num_of_actions,
MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
if (unlikely(!new_arg_data))
return;
- for (i = 0, j = 0; i < num_of_actions; i++, j++) {
- memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
- if (BIT(i) & nope_locations)
+ for (i = 0, j = 0; j < num_of_actions; i++, j++) {
+ if (BIT(i) & nop_locations)
j++;
+ memcpy(&new_arg_data[j * MLX5HWS_MODIFY_ACTION_SIZE],
+ &arg_data[i * MLX5HWS_MODIFY_ACTION_SIZE],
+ MLX5HWS_MODIFY_ACTION_SIZE);
}
}
@@ -2220,6 +2222,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
struct mlx5hws_action *action;
u32 arg_sz, arg_idx;
u8 *single_action;
+ u8 max_actions;
__be32 stc_idx;
rule_action = &apply->rule_action[setter->idx_double];
@@ -2247,21 +2250,23 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
*(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
- } else {
- /* Argument offset multiple with number of args per these actions */
- arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
- arg_idx = rule_action->modify_header.offset * arg_sz;
-
- apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
-
- if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
- apply->require_dep = 1;
- hws_action_modify_write(apply->queue,
- action->modify_header.arg_id + arg_idx,
- rule_action->modify_header.data,
- action->modify_header.num_of_actions,
- action->modify_header.nope_locations);
- }
+ return;
+ }
+
+ /* Argument offset multiple with number of args per these actions */
+ max_actions = action->modify_header.max_num_of_actions;
+ arg_sz = mlx5hws_arg_get_arg_size(max_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nop_locations);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index 64b76075f7f8..55a079fdd08f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -118,6 +118,12 @@ struct mlx5hws_action_template {
u8 only_term;
};
+struct mlx5hws_range_action_table {
+ struct mlx5hws_pool *pool;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+};
+
struct mlx5hws_action {
u8 type;
u8 flags;
@@ -130,7 +136,7 @@ struct mlx5hws_action {
u32 pat_id;
u32 arg_id;
__be64 single_action;
- u32 nope_locations;
+ u32 nop_locations;
u8 num_of_patterns;
u8 single_action_type;
u8 num_of_actions;
@@ -186,7 +192,7 @@ struct mlx5hws_action {
size_t size;
} remove_header;
struct {
- struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_range_action_table *table_ste;
struct mlx5hws_action *hit_ft_action;
struct mlx5hws_definer *definer;
} range;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
new file mode 100644
index 000000000000..5766a9c82f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include "internal.h"
+
+static const char *
+hws_pool_opt_to_str(enum mlx5hws_pool_optimize opt)
+{
+ switch (opt) {
+ case MLX5HWS_POOL_OPTIMIZE_NONE:
+ return "rx-and-tx";
+ case MLX5HWS_POOL_OPTIMIZE_ORIG:
+ return "rx-only";
+ case MLX5HWS_POOL_OPTIMIZE_MIRROR:
+ return "tx-only";
+ default:
+ return "unknown";
+ }
+}
+
+static int
+hws_action_ste_table_create_pool(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ struct mlx5hws_pool_attr pool_attr = { 0 };
+
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.flags = MLX5HWS_POOL_FLAG_BUDDY;
+ pool_attr.opt_type = opt;
+ pool_attr.alloc_log_sz = log_sz;
+
+ action_tbl->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_tbl->pool) {
+ mlx5hws_err(ctx, "Failed to allocate STE pool\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hws_action_ste_table_create_single_rtc(
+ struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz, bool tx)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = { 0 };
+ u32 *rtc_id;
+
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* Action STEs use the default always hit definer. */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+
+ if (tx) {
+ rtc_attr.table_type = FS_FT_FDB_TX;
+ rtc_attr.ste_base =
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+ rtc_attr.stc_base =
+ mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_1_id;
+ } else {
+ rtc_attr.table_type = FS_FT_FDB_RX;
+ rtc_attr.ste_base = mlx5hws_pool_get_base_id(action_tbl->pool);
+ rtc_attr.stc_base = mlx5hws_pool_get_base_id(ctx->stc_pool);
+ rtc_attr.log_size =
+ opt == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_sz;
+ rtc_id = &action_tbl->rtc_0_id;
+ }
+
+ return mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_id);
+}
+
+static int
+hws_action_ste_table_create_rtcs(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl,
+ enum mlx5hws_pool_optimize opt, size_t log_sz)
+{
+ int err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, false);
+ if (err)
+ return err;
+
+ err = hws_action_ste_table_create_single_rtc(ctx, action_tbl, opt,
+ log_sz, true);
+ if (err) {
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, action_tbl->rtc_0_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+hws_action_ste_table_destroy_rtcs(struct mlx5hws_action_ste_table *action_tbl)
+{
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(action_tbl->pool->ctx->mdev,
+ action_tbl->rtc_0_id);
+}
+
+static int
+hws_action_ste_table_create_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = { 0 };
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste_pool = action_tbl->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ return mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+}
+
+static struct mlx5hws_action_ste_table *
+hws_action_ste_table_alloc(struct mlx5hws_action_ste_pool_element *parent_elem)
+{
+ enum mlx5hws_pool_optimize opt = parent_elem->opt;
+ struct mlx5hws_context *ctx = parent_elem->ctx;
+ struct mlx5hws_action_ste_table *action_tbl;
+ size_t log_sz;
+ int err;
+
+ log_sz = min(parent_elem->log_sz ?
+ parent_elem->log_sz +
+ MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ :
+ MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ,
+ MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ);
+
+ action_tbl = kzalloc(sizeof(*action_tbl), GFP_KERNEL);
+ if (!action_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ err = hws_action_ste_table_create_pool(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto free_tbl;
+
+ err = hws_action_ste_table_create_rtcs(ctx, action_tbl, opt, log_sz);
+ if (err)
+ goto destroy_pool;
+
+ err = hws_action_ste_table_create_stc(ctx, action_tbl);
+ if (err)
+ goto destroy_rtcs;
+
+ action_tbl->parent_elem = parent_elem;
+ INIT_LIST_HEAD(&action_tbl->list_node);
+ action_tbl->last_used = jiffies;
+ list_add(&action_tbl->list_node, &parent_elem->available);
+ parent_elem->log_sz = log_sz;
+
+ mlx5hws_dbg(ctx,
+ "Allocated %s action STE table log_sz %zu; STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(opt), log_sz,
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ return action_tbl;
+
+destroy_rtcs:
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+destroy_pool:
+ mlx5hws_pool_destroy(action_tbl->pool);
+free_tbl:
+ kfree(action_tbl);
+
+ return ERR_PTR(err);
+}
+
+static void
+hws_action_ste_table_destroy(struct mlx5hws_action_ste_table *action_tbl)
+{
+ struct mlx5hws_context *ctx = action_tbl->parent_elem->ctx;
+
+ mlx5hws_dbg(ctx,
+ "Destroying %s action STE table: STEs (%d, %d); RTCs (%d, %d); STC %d\n",
+ hws_pool_opt_to_str(action_tbl->parent_elem->opt),
+ mlx5hws_pool_get_base_id(action_tbl->pool),
+ mlx5hws_pool_get_base_mirror_id(action_tbl->pool),
+ action_tbl->rtc_0_id, action_tbl->rtc_1_id,
+ action_tbl->stc.offset);
+
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action_tbl->stc);
+ hws_action_ste_table_destroy_rtcs(action_tbl);
+ mlx5hws_pool_destroy(action_tbl->pool);
+
+ list_del(&action_tbl->list_node);
+ kfree(action_tbl);
+}
+
+static int
+hws_action_ste_pool_element_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool_element *elem,
+ enum mlx5hws_pool_optimize opt)
+{
+ elem->ctx = ctx;
+ elem->opt = opt;
+ INIT_LIST_HEAD(&elem->available);
+ INIT_LIST_HEAD(&elem->full);
+
+ return 0;
+}
+
+static void hws_action_ste_pool_element_destroy(
+ struct mlx5hws_action_ste_pool_element *elem)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ /* This should be empty, but attempt to free its elements anyway. */
+ list_for_each_entry_safe(action_tbl, p, &elem->full, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static int hws_action_ste_pool_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ enum mlx5hws_pool_optimize opt;
+ int err;
+
+ mutex_init(&pool->lock);
+
+ /* Rules which are added for both RX and TX must use the same action STE
+ * indices for both. If we were to use a single table, then RX-only and
+ * TX-only rules would waste the unused entries. Thus, we use separate
+ * table sets for the three cases.
+ */
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ err = hws_action_ste_pool_element_init(ctx, &pool->elems[opt],
+ opt);
+ if (err)
+ goto destroy_elems;
+ pool->elems[opt].parent_pool = pool;
+ }
+
+ return 0;
+
+destroy_elems:
+ while (opt-- > MLX5HWS_POOL_OPTIMIZE_NONE)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+
+ return err;
+}
+
+static void hws_action_ste_pool_destroy(struct mlx5hws_action_ste_pool *pool)
+{
+ int opt;
+
+ for (opt = MLX5HWS_POOL_OPTIMIZE_MAX - 1;
+ opt >= MLX5HWS_POOL_OPTIMIZE_NONE; opt--)
+ hws_action_ste_pool_element_destroy(&pool->elems[opt]);
+}
+
+static void hws_action_ste_pool_element_collect_stale(
+ struct mlx5hws_action_ste_pool_element *elem, struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+ unsigned long expire_time, now;
+
+ expire_time = secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS);
+ now = jiffies;
+
+ list_for_each_entry_safe(action_tbl, p, &elem->available, list_node) {
+ if (mlx5hws_pool_full(action_tbl->pool) &&
+ time_before(action_tbl->last_used + expire_time, now))
+ list_move(&action_tbl->list_node, cleanup);
+ }
+}
+
+static void hws_action_ste_table_cleanup_list(struct list_head *cleanup)
+{
+ struct mlx5hws_action_ste_table *action_tbl, *p;
+
+ list_for_each_entry_safe(action_tbl, p, cleanup, list_node)
+ hws_action_ste_table_destroy(action_tbl);
+}
+
+static void hws_action_ste_pool_cleanup(struct work_struct *work)
+{
+ enum mlx5hws_pool_optimize opt;
+ struct mlx5hws_context *ctx;
+ LIST_HEAD(cleanup);
+ int i;
+
+ ctx = container_of(work, struct mlx5hws_context,
+ action_ste_cleanup.work);
+
+ for (i = 0; i < ctx->queues; i++) {
+ struct mlx5hws_action_ste_pool *p = &ctx->action_ste_pool[i];
+
+ mutex_lock(&p->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE;
+ opt < MLX5HWS_POOL_OPTIMIZE_MAX; opt++)
+ hws_action_ste_pool_element_collect_stale(
+ &p->elems[opt], &cleanup);
+ mutex_unlock(&p->lock);
+ }
+
+ hws_action_ste_table_cleanup_list(&cleanup);
+
+ schedule_delayed_work(&ctx->action_ste_cleanup,
+ secs_to_jiffies(
+ MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+}
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_ste_pool *pool;
+ size_t queues = ctx->queues;
+ int i, err;
+
+ pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return -ENOMEM;
+
+ for (i = 0; i < queues; i++) {
+ err = hws_action_ste_pool_init(ctx, &pool[i]);
+ if (err)
+ goto free_pool;
+ }
+
+ ctx->action_ste_pool = pool;
+
+ INIT_DELAYED_WORK(&ctx->action_ste_cleanup,
+ hws_action_ste_pool_cleanup);
+ schedule_delayed_work(
+ &ctx->action_ste_cleanup,
+ secs_to_jiffies(MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS));
+
+ return 0;
+
+free_pool:
+ while (i--)
+ hws_action_ste_pool_destroy(&pool[i]);
+ kfree(pool);
+
+ return err;
+}
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx)
+{
+ size_t queues = ctx->queues;
+ int i;
+
+ cancel_delayed_work_sync(&ctx->action_ste_cleanup);
+
+ for (i = 0; i < queues; i++)
+ hws_action_ste_pool_destroy(&ctx->action_ste_pool[i]);
+
+ kfree(ctx->action_ste_pool);
+}
+
+static struct mlx5hws_action_ste_pool_element *
+hws_action_ste_choose_elem(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx)
+{
+ if (skip_rx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_MIRROR];
+
+ if (skip_tx)
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_ORIG];
+
+ return &pool->elems[MLX5HWS_POOL_OPTIMIZE_NONE];
+}
+
+static int
+hws_action_ste_table_chunk_alloc(struct mlx5hws_action_ste_table *action_tbl,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ int err;
+
+ err = mlx5hws_pool_chunk_alloc(action_tbl->pool, &chunk->ste);
+ if (err)
+ return err;
+
+ chunk->action_tbl = action_tbl;
+ action_tbl->last_used = jiffies;
+
+ return 0;
+}
+
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mlx5hws_action_ste_pool_element *elem;
+ struct mlx5hws_action_ste_table *action_tbl;
+ bool found;
+ int err;
+
+ if (skip_rx && skip_tx)
+ return -EINVAL;
+
+ mutex_lock(&pool->lock);
+
+ elem = hws_action_ste_choose_elem(pool, skip_rx, skip_tx);
+
+ mlx5hws_dbg(elem->ctx,
+ "Allocating action STEs skip_rx %d skip_tx %d order %d\n",
+ skip_rx, skip_tx, chunk->ste.order);
+
+ found = false;
+ list_for_each_entry(action_tbl, &elem->available, list_node) {
+ if (!hws_action_ste_table_chunk_alloc(action_tbl, chunk)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ action_tbl = hws_action_ste_table_alloc(elem);
+ if (IS_ERR(action_tbl)) {
+ err = PTR_ERR(action_tbl);
+ goto out;
+ }
+
+ err = hws_action_ste_table_chunk_alloc(action_tbl, chunk);
+ if (err)
+ goto out;
+ }
+
+ if (mlx5hws_pool_empty(action_tbl->pool))
+ list_move(&action_tbl->list_node, &elem->full);
+
+ err = 0;
+
+out:
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk)
+{
+ struct mutex *lock = &chunk->action_tbl->parent_elem->parent_pool->lock;
+
+ mlx5hws_dbg(chunk->action_tbl->pool->ctx,
+ "Freeing action STEs offset %d order %d\n",
+ chunk->ste.offset, chunk->ste.order);
+
+ mutex_lock(lock);
+ mlx5hws_pool_chunk_free(chunk->action_tbl->pool, &chunk->ste);
+ chunk->action_tbl->last_used = jiffies;
+ list_move(&chunk->action_tbl->list_node,
+ &chunk->action_tbl->parent_elem->available);
+ mutex_unlock(lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
new file mode 100644
index 000000000000..a8ba97359e31
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action_ste_pool.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef ACTION_STE_POOL_H_
+#define ACTION_STE_POOL_H_
+
+#define MLX5HWS_ACTION_STE_TABLE_INIT_LOG_SZ 10
+#define MLX5HWS_ACTION_STE_TABLE_STEP_LOG_SZ 1
+#define MLX5HWS_ACTION_STE_TABLE_MAX_LOG_SZ 20
+
+#define MLX5HWS_ACTION_STE_POOL_CLEANUP_SECONDS 300
+#define MLX5HWS_ACTION_STE_POOL_EXPIRE_SECONDS 300
+
+struct mlx5hws_action_ste_pool_element;
+
+struct mlx5hws_action_ste_table {
+ struct mlx5hws_action_ste_pool_element *parent_elem;
+ /* Wraps the RTC and STE range for this given action. */
+ struct mlx5hws_pool *pool;
+ /* Match STEs use this STC to jump to this pool's RTC. */
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct list_head list_node;
+ unsigned long last_used;
+};
+
+struct mlx5hws_action_ste_pool_element {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_action_ste_pool *parent_pool;
+ size_t log_sz; /* Size of the largest table so far. */
+ enum mlx5hws_pool_optimize opt;
+ struct list_head available;
+ struct list_head full;
+};
+
+/* Central repository of action STEs. The context contains one of these pools
+ * per queue.
+ */
+struct mlx5hws_action_ste_pool {
+ /* Protects the entire pool. We have one pool per queue and only one
+ * operation can be active per rule at a given time. Thus this lock
+ * protects solely against concurrent garbage collection and we expect
+ * very little contention.
+ */
+ struct mutex lock;
+ struct mlx5hws_action_ste_pool_element elems[MLX5HWS_POOL_OPTIMIZE_MAX];
+};
+
+/* A chunk of STEs and the table it was allocated from. Used by rules. */
+struct mlx5hws_action_ste_chunk {
+ struct mlx5hws_action_ste_table *action_tbl;
+ struct mlx5hws_pool_chunk ste;
+};
+
+int mlx5hws_action_ste_pool_init(struct mlx5hws_context *ctx);
+
+void mlx5hws_action_ste_pool_uninit(struct mlx5hws_context *ctx);
+
+/* Callers are expected to fill chunk->ste.order. On success, this function
+ * populates chunk->tbl and chunk->ste.offset.
+ */
+int mlx5hws_action_ste_chunk_alloc(struct mlx5hws_action_ste_pool *pool,
+ bool skip_rx, bool skip_tx,
+ struct mlx5hws_action_ste_chunk *chunk);
+
+void mlx5hws_action_ste_chunk_free(struct mlx5hws_action_ste_chunk *chunk);
+
+#endif /* ACTION_STE_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 19dce1ba512d..9e057f808ea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -46,10 +46,14 @@ static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
}
}
-static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 priority,
- u8 size_log)
+ u8 size_log,
+ struct mlx5hws_matcher_attr *attr)
{
+ struct mlx5hws_bwc_matcher *first_matcher =
+ bwc_matcher->complex_first_bwc_matcher;
+
memset(attr, 0, sizeof(*attr));
attr->priority = priority;
@@ -61,6 +65,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
attr->rule.num_log = size_log;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+
+ attr->isolated_matcher_end_ft_id =
+ first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
@@ -83,20 +90,27 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
for (i = 0; i < bwc_queues; i++)
INIT_LIST_HEAD(&bwc_matcher->rules[i]);
- hws_bwc_matcher_init_attr(&attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
priority,
- MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG,
+ &attr);
bwc_matcher->priority = priority;
bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+ bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+ bwc_matcher->at = kcalloc(bwc_matcher->size_of_at_array,
+ sizeof(*bwc_matcher->at), GFP_KERNEL);
+ if (!bwc_matcher->at)
+ goto free_bwc_matcher_rules;
+
/* create dummy action template */
bwc_matcher->at[0] =
mlx5hws_action_template_create(action_types ?
action_types : init_action_types);
if (!bwc_matcher->at[0]) {
mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
- goto free_bwc_matcher_rules;
+ goto free_bwc_matcher_at_array;
}
bwc_matcher->num_of_at = 1;
@@ -126,6 +140,8 @@ free_mt:
mlx5hws_match_template_destroy(bwc_matcher->mt);
free_at:
mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_at_array:
+ kfree(bwc_matcher->at);
free_bwc_matcher_rules:
kfree(bwc_matcher->rules);
err:
@@ -153,6 +169,7 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
return NULL;
atomic_set(&bwc_matcher->num_of_rules, 0);
+ atomic_set(&bwc_matcher->rehash_required, false);
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
@@ -192,6 +209,7 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
for (i = 0; i < bwc_matcher->num_of_at; i++)
mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+ kfree(bwc_matcher->at);
mlx5hws_match_template_destroy(bwc_matcher->mt);
kfree(bwc_matcher->rules);
@@ -208,16 +226,19 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
"BWC matcher destroy: matcher still has %d rules\n",
num_of_rules);
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ if (bwc_matcher->complex)
+ mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
+ else
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
kfree(bwc_matcher);
return 0;
}
-static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
- u16 queue_id,
- u32 *pending_rules,
- bool drain)
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
{
unsigned long timeout = jiffies +
secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
@@ -320,16 +341,12 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- atomic_inc(&bwc_matcher->num_of_rules);
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
-
- atomic_dec(&bwc_matcher->num_of_rules);
list_del_init(&bwc_rule->list_node);
}
@@ -352,7 +369,8 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
if (unlikely(ret))
return ret;
@@ -382,6 +400,7 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
mutex_lock(queue_lock);
ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ atomic_dec(&bwc_matcher->num_of_rules);
hws_bwc_rule_list_remove(bwc_rule);
mutex_unlock(queue_lock);
@@ -391,9 +410,13 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
- int ret;
+ bool is_complex = !!bwc_rule->bwc_matcher->complex;
+ int ret = 0;
- ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (is_complex)
+ ret = mlx5hws_bwc_rule_destroy_complex(bwc_rule);
+ else
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
mlx5hws_bwc_rule_free(bwc_rule);
return ret;
@@ -433,9 +456,8 @@ hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
-
- return ret;
+ return mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
}
static int
@@ -456,7 +478,8 @@ hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret))
return ret;
- ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ ret = mlx5hws_bwc_queue_poll(ctx, rule_attr->queue_id,
+ &expected_completions, true);
if (unlikely(ret))
mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
@@ -469,21 +492,9 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
/* check the match RTC size */
- if ((bwc_matcher->size_log +
- MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
- MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
- (caps->ste_alloc_log_max - 1))
- return true;
-
- /* check the action RTC size */
- if ((bwc_matcher->size_log +
- MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP +
- ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) +
- MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) >
- (caps->ste_alloc_log_max - 1))
- return true;
-
- return false;
+ return (bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1);
}
static bool
@@ -520,6 +531,23 @@ hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_rule_action rule_actions[])
{
enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+ void *p;
+
+ if (unlikely(bwc_matcher->num_of_at >= bwc_matcher->size_of_at_array)) {
+ if (bwc_matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+ bwc_matcher->size_of_at_array *= 2;
+ p = krealloc(bwc_matcher->at,
+ bwc_matcher->size_of_at_array *
+ sizeof(*bwc_matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ bwc_matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ bwc_matcher->at = p;
+ }
hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
@@ -582,100 +610,79 @@ hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
{
+ bool move_error = false, poll_error = false, drain_error = false;
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
- struct mlx5hws_bwc_rule **bwc_rules;
struct mlx5hws_rule_attr rule_attr;
- u32 *pending_rules;
- int i, j, ret = 0;
- bool all_done;
- u16 burst_th;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5hws_send_engine *queue;
+ struct list_head *rules_list;
+ u32 pending_rules;
+ int i, ret = 0;
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
- pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
- if (!pending_rules)
- return -ENOMEM;
-
- bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
- if (!bwc_rules) {
- ret = -ENOMEM;
- goto free_pending_rules;
- }
-
for (i = 0; i < bwc_queues; i++) {
if (list_empty(&bwc_matcher->rules[i]))
- bwc_rules[i] = NULL;
- else
- bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
- struct mlx5hws_bwc_rule,
- list_node);
- }
-
- do {
- all_done = true;
+ continue;
- for (i = 0; i < bwc_queues; i++) {
- rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
- burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+ pending_rules = 0;
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ rules_list = &bwc_matcher->rules[i];
- for (j = 0; j < burst_th && bwc_rules[i]; j++) {
- rule_attr.burst = !!((j + 1) % burst_th);
- ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
- bwc_rules[i]->rule,
- &rule_attr);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n",
- ret);
- goto free_bwc_rules;
- }
-
- all_done = false;
- pending_rules[i]++;
- bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
- &bwc_matcher->rules[i]) ?
- NULL : list_next_entry(bwc_rules[i], list_node);
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ bwc_rule->rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
- ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
- &pending_rules[i], false);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n",
- ret);
- goto free_bwc_rules;
- }
+ pending_rules++;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ false);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
}
}
- } while (!all_done);
- /* drain all the bwc queues */
- for (i = 0; i < bwc_queues; i++) {
- if (pending_rules[i]) {
- u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
-
- mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
- ret = hws_bwc_queue_poll(ctx, queue_id,
- &pending_rules[i], true);
- if (unlikely(ret)) {
+ if (pending_rules) {
+ queue = &ctx->send_queue[rule_attr.queue_id];
+ mlx5hws_send_engine_flush_queue(queue);
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &pending_rules,
+ true);
+ if (unlikely(ret && !drain_error)) {
mlx5hws_err(ctx,
- "Moving BWC rule failed during rehash (%d)\n", ret);
- goto free_bwc_rules;
+ "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = true;
}
}
}
-free_bwc_rules:
- kfree(bwc_rules);
-free_pending_rules:
- kfree(pending_rules);
+ if (move_error || poll_error || drain_error)
+ ret = -EINVAL;
return ret;
}
static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- return hws_bwc_matcher_move_all_simple(bwc_matcher);
+ if (!bwc_matcher->complex)
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+
+ return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
}
static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
@@ -686,9 +693,10 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
struct mlx5hws_matcher *new_matcher;
int ret;
- hws_bwc_matcher_init_attr(&matcher_attr,
+ hws_bwc_matcher_init_attr(bwc_matcher,
bwc_matcher->priority,
- bwc_matcher->size_log);
+ bwc_matcher->size_log,
+ &matcher_attr);
old_matcher = bwc_matcher->matcher;
new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
@@ -708,15 +716,18 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
}
ret = hws_bwc_matcher_move_all(bwc_matcher);
- if (ret) {
- mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
- return -ENOMEM;
- }
+ if (ret)
+ mlx5hws_err(ctx, "Rehash error: moving rules failed, attempting to remove the old matcher\n");
+
+ /* Error during rehash can't be rolled back.
+ * The best option here is to allow the rehash to complete and remove
+ * the old matcher - can't leave the matcher in the 'in_resize' state.
+ */
bwc_matcher->matcher = new_matcher;
mlx5hws_matcher_destroy(old_matcher);
- return 0;
+ return ret;
}
static int
@@ -733,9 +744,9 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
/* It is possible that other rule has already performed rehash.
* Need to check again if we really need rehash.
- * If the reason for rehash was size, but not any more - skip rehash.
*/
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher,
+ if (!atomic_read(&bwc_matcher->rehash_required) &&
+ !hws_bwc_matcher_rehash_size_needed(bwc_matcher,
atomic_read(&bwc_matcher->num_of_rules)))
return 0;
@@ -746,6 +757,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* - destroy the old matcher
*/
+ atomic_set(&bwc_matcher->rehash_required, false);
+
ret = hws_bwc_matcher_extend_size(bwc_matcher);
if (ret)
return ret;
@@ -753,17 +766,51 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
return hws_bwc_matcher_move(bwc_matcher);
}
-static int
-hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+static int hws_bwc_rule_get_at_idx(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
{
- /* Rehash by action template doesn't require any additional checking.
- * The bwc_matcher already contains the new action template.
- * Just do the usual rehash:
- * - create new matcher
- * - move all the rules to the new matcher
- * - destroy the old matcher
- */
- return hws_bwc_matcher_move(bwc_matcher);
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx >= 0))
+ return at_idx;
+
+ /* we need to extend BWC matcher action templates array */
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (at_idx >= 0)
+ goto out;
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed extending AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "BWC rule: failed attaching new AT (%d)", ret);
+ at_idx = -EINVAL;
+ goto out;
+ }
+
+out:
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ return at_idx;
}
int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
@@ -786,50 +833,16 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, bwc_queue_idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- return ret;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule insertion: rehash AT failed (%d)\n", ret);
- return ret;
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule create: failed getting AT (%d)",
+ ret);
+ return -EINVAL;
}
/* check if number of rules require rehash */
- num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+ num_of_rules = atomic_inc_return(&bwc_matcher->num_of_rules);
if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
mutex_unlock(queue_lock);
@@ -843,6 +856,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
bwc_matcher->size_log,
ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -867,6 +881,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
* Try rehash by size and insert rule again - last chance.
*/
+ atomic_set(&bwc_matcher->rehash_required, true);
mutex_unlock(queue_lock);
hws_bwc_lock_all_queues(ctx);
@@ -875,6 +890,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (ret) {
mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -890,6 +906,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(ret)) {
mutex_unlock(queue_lock);
mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ atomic_dec(&bwc_matcher->num_of_rules);
return ret;
}
@@ -921,11 +938,18 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
- params->match_buf,
- rule_actions,
- flow_source,
- bwc_queue_idx);
+ if (bwc_matcher->complex)
+ ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
+ params,
+ flow_source,
+ rule_actions,
+ bwc_queue_idx);
+ else
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
if (unlikely(ret)) {
mlx5hws_bwc_rule_free(bwc_rule);
return NULL;
@@ -952,52 +976,11 @@ hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
mutex_lock(queue_lock);
- /* check if rehash needed due to missing action template */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ at_idx = hws_bwc_rule_get_at_idx(bwc_rule, rule_actions, idx);
if (unlikely(at_idx < 0)) {
- /* we need to extend BWC matcher action templates array */
mutex_unlock(queue_lock);
- hws_bwc_lock_all_queues(ctx);
-
- /* check again - perhaps other thread already did extend_at */
- at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
- if (likely(at_idx < 0)) {
- ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
- if (unlikely(ret)) {
- hws_bwc_unlock_all_queues(ctx);
- mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
- return -EINVAL;
- }
-
- /* action templates array was extended, we need the last idx */
- at_idx = bwc_matcher->num_of_at - 1;
-
- ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
- bwc_matcher->at[at_idx]);
- if (unlikely(ret)) {
- /* Action template attach failed, possibly due to
- * requiring more action STEs.
- * Need to attempt creating new matcher with all
- * the action templates, including the new one.
- */
- ret = hws_bwc_matcher_rehash_at(bwc_matcher);
- if (unlikely(ret)) {
- mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
- bwc_matcher->at[at_idx] = NULL;
- bwc_matcher->num_of_at--;
-
- hws_bwc_unlock_all_queues(ctx);
-
- mlx5hws_err(ctx,
- "BWC rule update: rehash AT failed (%d)\n",
- ret);
- return ret;
- }
- }
- }
-
- hws_bwc_unlock_all_queues(ctx);
- mutex_lock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule update: failed getting AT\n");
+ return -EINVAL;
}
ret = hws_bwc_rule_update_sync(bwc_rule,
@@ -1023,5 +1006,10 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
- return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+ /* For complex rule, the update should happen on the second matcher */
+ if (bwc_rule->isolated_bwc_rule)
+ return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
+ rule_actions);
+ else
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 47f7ed141553..d21fc247a510 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -10,9 +10,7 @@
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
/* Max number of AT attach operations for the same matcher.
- * When the limit is reached, next attempt to attach new AT
- * will result in creation of a new matcher and moving all
- * the rules to this matcher.
+ * When the limit is reached, a larger buffer is allocated for the ATs.
*/
#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
@@ -20,20 +18,27 @@
#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+struct mlx5hws_bwc_matcher_complex_data;
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
- struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
- u32 priority;
+ struct mlx5hws_action_template **at;
+ struct mlx5hws_bwc_matcher_complex_data *complex;
+ struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
+ u8 size_of_at_array;
u8 size_log;
+ u32 priority;
atomic_t num_of_rules;
+ atomic_t rehash_required;
struct list_head *rules;
};
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
+ struct mlx5hws_bwc_rule *isolated_bwc_rule;
+ struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
u16 bwc_queue_idx;
struct list_head list_node;
};
@@ -65,6 +70,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u32 flow_source,
struct mlx5hws_rule_attr *rule_attr);
+int mlx5hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain);
+
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index 9fb059a6511f..70768953a4f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -3,6 +3,22 @@
#include "internal.h"
+#define HWS_CLEAR_MATCH_PARAM(mask, field) \
+ MLX5_SET(fte_match_param, (mask)->match_buf, field, 0)
+
+#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
+
+static const struct rhashtable_params hws_refcount_hash = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node,
+ match_buf),
+ .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
+ match_buf),
+ .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
+ hash_node),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
@@ -48,20 +64,1078 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
return is_complex;
}
+static void
+hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx,
+ enum mlx5hws_definer_fname fname,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ switch (fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ case MLX5HWS_DEFINER_FNAME_IP_VERSION_O:
+ case MLX5HWS_DEFINER_FNAME_IP_VERSION_I:
+ /* Because of the strict requirements for IP address matching
+ * that require ethtype/ip_version matching as well, don't clear
+ * these fields - have them in both parts of the complex matcher
+ */
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_ID_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_ID_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_second_cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_second_svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_second_cvlan_tag);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_second_svlan_tag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_DSCP_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_DSCP_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_ECN_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_ECN_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_TTL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_TTL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_DST_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_DST_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_FRAG_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_FRAG_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.outer_ipv6_flow_label);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.inner_ipv6_flow_label);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I:
+ case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_SPORT_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_SPORT_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_DPORT_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_L4_DPORT_I:
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
+ HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O:
+ HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM:
+ case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_tcp_seq_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_tcp_ack_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.inner_tcp_seq_num);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.inner_tcp_ack_num);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTP_TEID:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.gtpu_first_ext_dw_0);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GTPU_DW2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_2.outer_first_mpls_over_gre);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_2.outer_first_mpls_over_udp);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.geneve_tlv_option_0_data);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_0);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_0);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_1);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_2);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_2);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_id_3);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_4.prog_sample_field_value_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_flags);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0:
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_next_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.outer_vxlan_gpe_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_OAM:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO:
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.geneve_protocol_type);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GENEVE_VNI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni);
+ break;
+ case MLX5HWS_DEFINER_FNAME_SOURCE_QP:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn);
+ break;
+ case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_0:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_3:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_4:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_5:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_7:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_A:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_C:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_K:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_S:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol);
+ break;
+ case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key);
+ break;
+ case MLX5HWS_DEFINER_FNAME_ICMP_DW1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code);
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters_3.icmpv6_header_data);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type);
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code);
+ break;
+ case MLX5HWS_DEFINER_FNAME_MPLS0_O:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls);
+ break;
+ case MLX5HWS_DEFINER_FNAME_MPLS0_I:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_0:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_1:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_2:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2);
+ break;
+ case MLX5HWS_DEFINER_FNAME_TNL_HDR_3:
+ HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3);
+ break;
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK:
+ case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK:
+ /* assuming this is flex parser for geneve option */
+ if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) ||
+ (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK &&
+ ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) {
+ mlx5hws_err(ctx,
+ "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n",
+ mlx5hws_definer_fname_to_str(fname), fname,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ break;
+ }
+ HWS_CLEAR_MATCH_PARAM(mask,
+ misc_parameters.geneve_tlv_option_0_exist);
+ break;
+ case MLX5HWS_DEFINER_FNAME_REG_6:
+ default:
+ mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n",
+ mlx5hws_definer_fname_to_str(fname), fname);
+ break;
+ }
+}
+
+static bool
+hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 combination_num)
+{
+ bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0};
+ bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0};
+ bool is_first_matcher;
+ int i;
+
+ for (i = 0; i < fc_sz; i++) {
+ is_first_matcher = !(combination_num & BIT(i));
+ if (is_first_matcher)
+ m1[fc[i].fname] = true;
+ else
+ m2[fc[i].fname] = true;
+ }
+
+ /* Not all the fields can be split into separate matchers.
+ * Some should be together on the same matcher.
+ * For example, IPv6 parts - the whole IPv6 address should be on the
+ * same matcher in order for us to deduce if it's IPv6 or IPv4 address.
+ */
+ if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
+ (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
+ return false;
+
+ if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
+ (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
+ return false;
+
+ if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
+ (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
+ return false;
+
+ if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
+ (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
+ return false;
+
+ /* Don't split outer IPv6 dest address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]))
+ return false;
+
+ /* Don't split outer IPv6 source address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]))
+ return false;
+
+ /* Don't split inner IPv6 dest address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]))
+ return false;
+
+ /* Don't split inner IPv6 source address. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
+ m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
+ m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]))
+ return false;
+
+ /* Don't split GRE parameters. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_K] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_S] ||
+ m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_GRE_C] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_K] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_S] ||
+ m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]))
+ return false;
+
+ /* Don't split TCP ack/seq numbers. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
+ m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
+ m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]))
+ return false;
+
+ /* Don't split flex parser. */
+ if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
+ m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) &&
+ (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
+ m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]))
+ return false;
+
+ return true;
+}
+
+static void
+hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_parameters *m,
+ struct mlx5hws_match_parameters *m1,
+ struct mlx5hws_match_parameters *m2,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 combination_num)
+{
+ bool is_first_matcher;
+ int i;
+
+ memcpy(m1->match_buf, m->match_buf, m->match_sz);
+ memcpy(m2->match_buf, m->match_buf, m->match_sz);
+
+ for (i = 0; i < fc_sz; i++) {
+ is_first_matcher = !(combination_num & BIT(i));
+ hws_bwc_matcher_complex_params_clear_fld(ctx,
+ fc[i].fname,
+ is_first_matcher ?
+ m2 : m1);
+ }
+
+ MLX5_SET(fte_match_param, m2->match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+}
+
+static void
+hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1,
+ struct mlx5hws_match_parameters *mask_2)
+{
+ kfree(mask_1->match_buf);
+ kfree(mask_2->match_buf);
+}
+
+static int
+hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *mask_1,
+ struct mlx5hws_match_parameters *mask_2)
+{
+ struct mlx5hws_definer_fc *fc;
+ u32 num_of_combinations;
+ int fc_sz = 0;
+ int res = 0;
+ u32 i;
+
+ if (MLX5_GET(fte_match_param, mask->match_buf,
+ misc_parameters_2.metadata_reg_c_6)) {
+ mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n");
+ res = -EINVAL;
+ goto out;
+ }
+
+ mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ if (!mask_1->match_buf || !mask_2->match_buf) {
+ mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n");
+ res = -ENOMEM;
+ goto free_params;
+ }
+
+ mask_1->match_sz = mask->match_sz;
+ mask_2->match_sz = mask->match_sz;
+
+ fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx,
+ match_criteria,
+ mask->match_buf,
+ &fc_sz);
+ if (!fc) {
+ res = -ENOMEM;
+ goto free_params;
+ }
+
+ if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) {
+ mlx5hws_err(ctx,
+ "Complex matcher: too many match parameters (%d)\n",
+ fc_sz);
+ res = -EINVAL;
+ goto free_fc;
+ }
+
+ /* We have list of all the match fields from the match parameter.
+ * Now try all the possibilities of splitting them into two match
+ * buffers and look for the supported combination.
+ */
+ num_of_combinations = 1 << fc_sz;
+
+ /* Start from combination at index 1 - we know that 0 is unsupported */
+ for (i = 1; i < num_of_combinations; i++) {
+ if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i))
+ continue;
+
+ hws_bwc_matcher_complex_params_comb_create(ctx,
+ mask, mask_1, mask_2,
+ fc, fc_sz, i);
+ /* We now have two separate sets of match params.
+ * Check if each of them can be used in its own matcher.
+ */
+ if (!mlx5hws_bwc_match_params_is_complex(ctx,
+ match_criteria,
+ mask_1) &&
+ !mlx5hws_bwc_match_params_is_complex(ctx,
+ match_criteria,
+ mask_2))
+ break;
+ }
+
+ if (i == num_of_combinations) {
+ /* We've scanned all the combinations, but to no avail */
+ mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n");
+ res = -EINVAL;
+ goto free_fc;
+ }
+
+ kfree(fc);
+ return 0;
+
+free_fc:
+ kfree(fc);
+free_params:
+ hws_bwc_matcher_complex_params_destroy(mask_1, mask_2);
+out:
+ return res;
+}
+
+static int
+hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ struct mlx5hws_table_attr tbl_attr = {0};
+ struct mlx5hws_table *isolated_tbl;
+ int ret = 0;
+
+ tbl_attr.type = table->type;
+ tbl_attr.level = table->level;
+
+ bwc_matcher->complex->isolated_tbl =
+ mlx5hws_table_create(ctx, &tbl_attr);
+ isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ if (!isolated_tbl)
+ return -EINVAL;
+
+ /* Set the default miss of the isolated table to
+ * point to the end anchor of the original matcher.
+ */
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx,
+ isolated_tbl->fw_ft_type,
+ isolated_tbl->type,
+ &ft_attr);
+ ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev,
+ &ft_attr,
+ isolated_tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n");
+ goto destroy_tbl;
+ }
+
+ return 0;
+
+destroy_tbl:
+ mlx5hws_table_destroy(isolated_tbl);
+ return ret;
+}
+
+static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl)
+{
+ /* This table is isolated - no table is pointing to it, no need to
+ * disconnect it from anywhere, it won't affect any other table's miss.
+ */
+ mlx5hws_table_destroy(isolated_tbl);
+}
+
+static int
+hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_context *ctx = table->ctx;
+ int ret;
+
+ isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!isolated_bwc_matcher)
+ return -ENOMEM;
+
+ bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher;
+
+ /* Isolated BWC matcher needs access to the first BWC matcher */
+ isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher;
+
+ /* Isolated matcher needs to match on REG_C_6,
+ * so make sure its criteria bit is on.
+ */
+ match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+
+ ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher,
+ isolated_tbl,
+ 0,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n");
+ goto free_matcher;
+ }
+
+ return 0;
+
+free_matcher:
+ kfree(bwc_matcher->complex->isolated_bwc_matcher);
+ return ret;
+}
+
+static void
+hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ kfree(bwc_matcher);
+}
+
+static int
+hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table)
+{
+ struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ struct mlx5hws_action_mh_pattern ptrn;
+ int ret = 0;
+
+ /* Create action to jump to isolated table */
+
+ bwc_matcher->complex->action_go_to_tbl =
+ mlx5hws_action_create_dest_table(ctx,
+ isolated_tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_go_to_tbl) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n");
+ return -EINVAL;
+ }
+
+ /* Create modify header action to set REG_C_6 */
+
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action, offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action, data, 0);
+
+ ptrn.data = (void *)modify_hdr_action;
+ ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
+
+ bwc_matcher->complex->action_metadata =
+ mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_metadata) {
+ ret = -EINVAL;
+ goto destroy_action_go_to_tbl;
+ }
+
+ /* Create last action */
+
+ bwc_matcher->complex->action_last =
+ mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!bwc_matcher->complex->action_last) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
+ ret = -EINVAL;
+ goto destroy_action_metadata;
+ }
+
+ return 0;
+
+destroy_action_metadata:
+ mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
+destroy_action_go_to_tbl:
+ mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ return ret;
+}
+
+static void
+hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_action_destroy(bwc_matcher->complex->action_last);
+ mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
+ mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+}
+
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
u32 priority,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
- return -EOPNOTSUPP;
+ enum mlx5hws_action_type complex_init_action_types[3];
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_match_parameters mask_1 = {0};
+ struct mlx5hws_match_parameters mask_2 = {0};
+ struct mlx5hws_context *ctx = table->ctx;
+ int ret;
+
+ ret = hws_bwc_matcher_complex_params_create(table->ctx,
+ match_criteria_enable,
+ mask, &mask_1, &mask_2);
+ if (ret)
+ goto err;
+
+ bwc_matcher->complex =
+ kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL);
+ if (!bwc_matcher->complex) {
+ ret = -ENOMEM;
+ goto free_masks;
+ }
+
+ ret = rhashtable_init(&bwc_matcher->complex->refcount_hash,
+ &hws_refcount_hash);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n");
+ goto free_complex;
+ }
+
+ mutex_init(&bwc_matcher->complex->hash_lock);
+ ida_init(&bwc_matcher->complex->metadata_ida);
+
+ /* Create initial action template for the first matcher.
+ * Usually the initial AT is just dummy, but in case of complex
+ * matcher we know exactly which actions should it have.
+ */
+
+ complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ /* Create the first matcher */
+
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ &mask_1,
+ complex_init_action_types);
+ if (ret)
+ goto destroy_ida;
+
+ /* Create isolated table to hold the second isolated matcher */
+
+ ret = hws_bwc_isolated_table_create(bwc_matcher, table);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n");
+ goto destroy_first_matcher;
+ }
+
+ /* Now create the second BWC matcher - the isolated one */
+
+ ret = hws_bwc_isolated_matcher_create(bwc_matcher, table,
+ match_criteria_enable, &mask_2);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n");
+ goto destroy_isolated_tbl;
+ }
+
+ /* Create action for isolated matcher's rules */
+
+ ret = hws_bwc_isolated_actions_create(bwc_matcher, table);
+ if (ret) {
+ mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n");
+ goto destroy_isolated_matcher;
+ }
+
+ hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
+ return 0;
+
+destroy_isolated_matcher:
+ isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
+ hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
+destroy_isolated_tbl:
+ hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
+destroy_first_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+destroy_ida:
+ ida_destroy(&bwc_matcher->complex->metadata_ida);
+ mutex_destroy(&bwc_matcher->complex->hash_lock);
+ rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+free_complex:
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+free_masks:
+ hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
+err:
+ return ret;
}
void
mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- /* nothing to do here */
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher =
+ bwc_matcher->complex->isolated_bwc_matcher;
+
+ hws_bwc_isolated_actions_destroy(bwc_matcher);
+ hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
+ hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+ ida_destroy(&bwc_matcher->complex->metadata_ida);
+ mutex_destroy(&bwc_matcher->complex->hash_lock);
+ rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+ kfree(bwc_matcher->complex);
+ bwc_matcher->complex = NULL;
+}
+
+static void
+hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mutex_lock(&bwc_matcher->complex->hash_lock);
+}
+
+static void
+hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mutex_unlock(&bwc_matcher->complex->hash_lock);
+}
+
+static int
+hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
+ struct rhashtable *refcount_hash;
+ int i;
+
+ bwc_rule->complex_hash_node = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (unlikely(!node))
+ return -ENOMEM;
+
+ node->tag = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ refcount_set(&node->refcount, 1);
+
+ /* Clear match buffer - turn off all the unrelated fields
+ * in accordance with the match params mask for the first
+ * matcher out of the two parts of the complex matcher.
+ * The resulting mask is the key for the hash.
+ */
+ for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
+ node->match_buf[i] = params->match_buf[i] &
+ bwc_matcher->mt->match_param[i];
+
+ refcount_hash = &bwc_matcher->complex->refcount_hash;
+ old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
+ &node->hash_node,
+ hws_refcount_hash);
+ if (old_node) {
+ /* Rule with the same tag already exists - update refcount */
+ refcount_inc(&old_node->refcount);
+ /* Let the new rule use the same tag as the existing rule.
+ * Note that we don't have any indication for the rule creation
+ * process that a rule with similar matching params already
+ * exists - no harm done when this rule is be overwritten by
+ * the same STE.
+ * There's some performance advantage in skipping such cases,
+ * so this is left for future optimizations.
+ */
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+ kfree(node);
+ node = old_node;
+ }
+
+ bwc_rule->complex_hash_node = node;
+ return 0;
+}
+
+static void
+hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule,
+ bool *is_last_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_complex_rule_hash_node *node;
+
+ if (is_last_rule)
+ *is_last_rule = false;
+
+ node = bwc_rule->complex_hash_node;
+ if (refcount_dec_and_test(&node->refcount)) {
+ rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash,
+ &node->hash_node,
+ hws_refcount_hash);
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+ kfree(node);
+ if (is_last_rule)
+ *is_last_rule = true;
+ }
+
+ bwc_rule->complex_hash_node = NULL;
}
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
@@ -70,19 +1144,271 @@ int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_action rule_actions[],
u16 bwc_queue_idx)
{
- mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
- "Complex rule is not supported yet\n");
- return -EOPNOTSUPP;
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
+ struct mlx5hws_rule_action rule_actions_1[3] = {0};
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ u32 *match_buf_2;
+ u32 metadata_val;
+ int ret = 0;
+
+ isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
+ bwc_rule->isolated_bwc_rule =
+ mlx5hws_bwc_rule_alloc(isolated_bwc_matcher);
+ if (unlikely(!bwc_rule->isolated_bwc_rule))
+ return -ENOMEM;
+
+ hws_bwc_matcher_complex_hash_lock(bwc_matcher);
+
+ /* Get a new hash node for this complex rule.
+ * If this is a unique set of match params for the first matcher,
+ * we will get a new hash node with newly allocated IDA.
+ * Otherwise we will get an existing node with IDA and updated refcount.
+ */
+ ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n");
+ goto free_isolated_rule;
+ }
+
+ /* No need to clear match buffer's fields in accordance to what
+ * will actually be matched on first and second matchers.
+ * Both matchers were created with the appropriate masks
+ * and each of them holds the appropriate field copy array,
+ * so rule creation will use only the fields that will be copied
+ * in accordance with setters in field copy array.
+ * We do, however, need to temporary allocate match buffer
+ * for the second (isolated) rule in order to not modify
+ * user's match params buffer.
+ */
+
+ match_buf_2 = kmemdup(params->match_buf,
+ MLX5_ST_SZ_BYTES(fte_match_param),
+ GFP_KERNEL);
+ if (unlikely(!match_buf_2)) {
+ mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n");
+ ret = -ENOMEM;
+ goto hash_node_put;
+ }
+
+ /* On 2nd matcher, use unique 32-bit ID as a matching tag */
+ metadata_val = bwc_rule->complex_hash_node->tag;
+ MLX5_SET(fte_match_param, match_buf_2,
+ misc_parameters_2.metadata_reg_c_6, metadata_val);
+
+ /* Isolated rule's rule_actions contain all the original actions */
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule,
+ match_buf_2,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ kfree(match_buf_2);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Complex rule: failed creating isolated BWC rule (%d)\n",
+ ret);
+ goto hash_node_put;
+ }
+
+ /* First rule's rule_actions contain setting metadata and
+ * jump to isolated table that contains the second matcher.
+ * Set metadata value to a unique value for this rule.
+ */
+
+ MLX5_SET(set_action_in, modify_hdr_action,
+ action_type, MLX5_MODIFICATION_TYPE_SET);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ field, MLX5_MODI_META_REG_C_6);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ length, 0); /* zero means length of 32 */
+ MLX5_SET(set_action_in, modify_hdr_action,
+ offset, 0);
+ MLX5_SET(set_action_in, modify_hdr_action,
+ data, metadata_val);
+
+ rule_actions_1[0].action = bwc_matcher->complex->action_metadata;
+ rule_actions_1[0].modify_header.offset = 0;
+ rule_actions_1[0].modify_header.data = modify_hdr_action;
+
+ rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl;
+ rule_actions_1[2].action = bwc_matcher->complex->action_last;
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions_1,
+ flow_source,
+ bwc_queue_idx);
+
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Complex rule: failed creating first BWC rule (%d)\n",
+ ret);
+ goto destroy_isolated_rule;
+ }
+
+ hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+
+ return 0;
+
+destroy_isolated_rule:
+ mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule);
+hash_node_put:
+ hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL);
+free_isolated_rule:
+ hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+ mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule);
+ return ret;
}
int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
{
- return 0;
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *isolated_bwc_rule;
+ int ret_isolated, ret;
+ bool is_last_rule;
+
+ hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher);
+
+ hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n");
+
+ isolated_bwc_rule = bwc_rule->isolated_bwc_rule;
+ ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule);
+ if (unlikely(ret_isolated))
+ mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n");
+
+ hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher);
+
+ mlx5hws_bwc_rule_free(isolated_bwc_rule);
+
+ return ret || ret_isolated;
+}
+
+static void
+hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_complex_rule_hash_node *node;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter);
+ rhashtable_walk_start(&iter);
+
+ while ((node = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(node))
+ continue;
+ node->rtc_valid = false;
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
}
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+int
+mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
- "Moving complex rule is not supported yet\n");
- return -EOPNOTSUPP;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ bool move_error = false, poll_error = false;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *tmp_bwc_rule;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_table *isolated_tbl;
+ struct mlx5hws_rule *tmp_rule;
+ struct list_head *rules_list;
+ u32 expected_completions = 1;
+ u32 end_ft_id;
+ int i, ret;
+
+ /* We are rehashing the matcher that is the first part of the complex
+ * matcher. Need to update the isolated matcher to point to the end_ft
+ * of this new matcher. This needs to be done before moving any rules
+ * to prevent possible steering loops.
+ */
+ isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+
+ hws_bwc_matcher_clear_hash_rtcs(bwc_matcher);
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
+ continue;
+
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ list_for_each_entry(tmp_bwc_rule, rules_list, list_node) {
+ /* Check if a rule with similar tag has already
+ * been moved.
+ */
+ if (tmp_bwc_rule->complex_hash_node->rtc_valid) {
+ /* This rule is a duplicate of rule with similar
+ * tag that has already been moved earlier.
+ * Just update this rule's RTCs.
+ */
+ tmp_bwc_rule->rule->rtc_0 =
+ tmp_bwc_rule->complex_hash_node->rtc_0;
+ tmp_bwc_rule->rule->rtc_1 =
+ tmp_bwc_rule->complex_hash_node->rtc_1;
+ tmp_bwc_rule->rule->matcher =
+ tmp_bwc_rule->rule->matcher->resize_dst;
+ continue;
+ }
+
+ /* First time we're moving rule with this tag.
+ * Move it for real.
+ */
+ tmp_rule = tmp_bwc_rule->rule;
+ tmp_rule->skip_delete = false;
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ tmp_rule,
+ &rule_attr);
+ if (unlikely(ret && !move_error)) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = true;
+ }
+
+ expected_completions = 1;
+ ret = mlx5hws_bwc_queue_poll(ctx,
+ rule_attr.queue_id,
+ &expected_completions,
+ true);
+ if (unlikely(ret && !poll_error)) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = true;
+ }
+
+ /* Done moving the rule to the new matcher,
+ * now update RTCs for all the duplicated rules.
+ */
+ tmp_bwc_rule->complex_hash_node->rtc_0 =
+ tmp_bwc_rule->rule->rtc_0;
+ tmp_bwc_rule->complex_hash_node->rtc_1 =
+ tmp_bwc_rule->rule->rtc_1;
+
+ tmp_bwc_rule->complex_hash_node->rtc_valid = true;
+ }
+ }
+
+ if (move_error || poll_error)
+ ret = -EINVAL;
+
+ return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
index 340f0688e394..a6887c7e39d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -4,6 +4,27 @@
#ifndef HWS_BWC_COMPLEX_H_
#define HWS_BWC_COMPLEX_H_
+struct mlx5hws_bwc_complex_rule_hash_node {
+ u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM];
+ u32 tag;
+ refcount_t refcount;
+ bool rtc_valid;
+ u32 rtc_0;
+ u32 rtc_1;
+ struct rhash_head hash_node;
+};
+
+struct mlx5hws_bwc_matcher_complex_data {
+ struct mlx5hws_table *isolated_tbl;
+ struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_action *action_metadata;
+ struct mlx5hws_action *action_go_to_tbl;
+ struct mlx5hws_action *action_last;
+ struct rhashtable refcount_hash;
+ struct mutex hash_lock; /* Protect the refcount rhashtable */
+ struct ida metadata_ida;
+};
+
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index e8f98c109b99..9c83753e4592 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -406,7 +406,6 @@ int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
- MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 51d9e0291ac1..fa6bff210266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -70,7 +70,6 @@ struct mlx5hws_cmd_rtc_create_attr {
u32 pd;
u32 stc_base;
u32 ste_base;
- u32 ste_offset;
u32 miss_ft_id;
bool fw_gen_wqe;
u8 update_index_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index 9cda2774fd64..428dae869706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -34,7 +34,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
/* Create an STC pool per FT type */
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
@@ -159,10 +158,16 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto pools_uninit;
+ ret = mlx5hws_action_ste_pool_init(ctx);
+ if (ret)
+ goto close_queues;
+
INIT_LIST_HEAD(&ctx->tbl_list);
return 0;
+close_queues:
+ mlx5hws_send_queues_close(ctx);
pools_uninit:
hws_context_pools_uninit(ctx);
uninit_pd:
@@ -175,6 +180,7 @@ static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
return;
+ mlx5hws_action_ste_pool_uninit(ctx);
mlx5hws_send_queues_close(ctx);
hws_context_pools_uninit(ctx);
hws_context_uninit_pd(ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 38c3647444ad..3f8938c73dc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -39,6 +39,8 @@ struct mlx5hws_context {
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_action_ste_pool *action_ste_pool; /* One per queue */
+ struct delayed_work action_ste_cleanup;
struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 696275fd0ce2..91568d6c1dac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -118,7 +118,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
{
enum mlx5hws_table_type tbl_type = matcher->tbl->type;
struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
- struct mlx5hws_pool_chunk *ste;
struct mlx5hws_pool *ste_pool;
u64 icm_addr_0 = 0;
u64 icm_addr_1 = 0;
@@ -134,12 +133,11 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->end_ft_id,
matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
- ste = &matcher->match_ste.ste;
ste_pool = matcher->match_ste.pool;
if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ ste_0_id = mlx5hws_pool_get_base_id(ste_pool);
if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ ste_1_id = mlx5hws_pool_get_base_mirror_id(ste_pool);
}
seq_printf(f, ",%d,%d,%d,%d",
@@ -148,19 +146,6 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste.ste;
- ste_pool = matcher->action_ste.pool;
- if (ste_pool) {
- ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
- if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
- ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
- else
- ste_1_id = -1;
- } else {
- ste_0_id = -1;
- ste_1_id = -1;
- }
-
ft_attr.type = matcher->tbl->fw_ft_type;
ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
matcher->end_ft_id,
@@ -170,10 +155,7 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
if (ret)
return ret;
- seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste.rtc_0_id, (int)ste_0_id,
- matcher->action_ste.rtc_1_id, (int)ste_1_id,
- 0,
+ seq_printf(f, ",-1,-1,-1,-1,0,0x%llx,0x%llx\n",
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -387,14 +369,17 @@ static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context
if (!stc_pool)
return 0;
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]);
+ if (stc_pool->resource) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx,
+ stc_pool->resource);
if (ret)
return ret;
}
- if (stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]);
+ if (stc_pool->mirror_resource) {
+ struct mlx5hws_pool_resource *res = stc_pool->mirror_resource;
+
+ ret = hws_debug_dump_context_stc_resource(f, ctx, res);
if (ret)
return ret;
}
@@ -402,10 +387,41 @@ static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context
return 0;
}
+static void
+hws_debug_dump_action_ste_table(struct seq_file *f,
+ struct mlx5hws_action_ste_table *action_tbl)
+{
+ int ste_0_id = mlx5hws_pool_get_base_id(action_tbl->pool);
+ int ste_1_id = mlx5hws_pool_get_base_mirror_id(action_tbl->pool);
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE,
+ HWS_PTR_TO_ID(action_tbl),
+ action_tbl->rtc_0_id, ste_0_id,
+ action_tbl->rtc_1_id, ste_1_id);
+}
+
+static void hws_debug_dump_action_ste_pool(struct seq_file *f,
+ struct mlx5hws_action_ste_pool *pool)
+{
+ struct mlx5hws_action_ste_table *action_tbl;
+ enum mlx5hws_pool_optimize opt;
+
+ mutex_lock(&pool->lock);
+ for (opt = MLX5HWS_POOL_OPTIMIZE_NONE; opt < MLX5HWS_POOL_OPTIMIZE_MAX;
+ opt++) {
+ list_for_each_entry(action_tbl, &pool->elems[opt].available,
+ list_node) {
+ hws_debug_dump_action_ste_table(f, action_tbl);
+ }
+ }
+ mutex_unlock(&pool->lock);
+}
+
static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
{
struct mlx5hws_table *tbl;
- int ret;
+ int ret, i;
ret = hws_debug_dump_context_info(f, ctx);
if (ret)
@@ -425,6 +441,9 @@ static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ct
return ret;
}
+ for (i = 0; i < ctx->queues; i++)
+ hws_debug_dump_action_ste_pool(f, &ctx->action_ste_pool[i]);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
index e44e7ae28f93..89c396f9f266 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h
@@ -26,6 +26,8 @@ enum mlx5hws_debug_res_type {
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+
+ MLX5HWS_DEBUG_RES_TYPE_ACTION_STE_TABLE = 4300,
};
static inline u64
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index c8cc0c8115f5..5cc0dc002ac1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -158,6 +158,218 @@ struct mlx5hws_definer_conv_data {
u32 match_flags;
};
+#define HWS_DEFINER_ENTRY(name)[MLX5HWS_DEFINER_FNAME_##name] = #name
+
+static const char * const hws_definer_fname_to_str[] = {
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_SMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_47_16_I),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_O),
+ HWS_DEFINER_ENTRY(ETH_DMAC_15_0_I),
+ HWS_DEFINER_ENTRY(ETH_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_TYPE_I),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_O),
+ HWS_DEFINER_ENTRY(ETH_L3_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_FIRST_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_ID_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_TYPE_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_PRIO_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_CFI_I),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_O),
+ HWS_DEFINER_ENTRY(VLAN_SECOND_ID_I),
+ HWS_DEFINER_ENTRY(IPV4_IHL_O),
+ HWS_DEFINER_ENTRY(IPV4_IHL_I),
+ HWS_DEFINER_ENTRY(IP_DSCP_O),
+ HWS_DEFINER_ENTRY(IP_DSCP_I),
+ HWS_DEFINER_ENTRY(IP_ECN_O),
+ HWS_DEFINER_ENTRY(IP_ECN_I),
+ HWS_DEFINER_ENTRY(IP_TTL_O),
+ HWS_DEFINER_ENTRY(IP_TTL_I),
+ HWS_DEFINER_ENTRY(IPV4_DST_O),
+ HWS_DEFINER_ENTRY(IPV4_DST_I),
+ HWS_DEFINER_ENTRY(IPV4_SRC_O),
+ HWS_DEFINER_ENTRY(IPV4_SRC_I),
+ HWS_DEFINER_ENTRY(IP_VERSION_O),
+ HWS_DEFINER_ENTRY(IP_VERSION_I),
+ HWS_DEFINER_ENTRY(IP_FRAG_O),
+ HWS_DEFINER_ENTRY(IP_FRAG_I),
+ HWS_DEFINER_ENTRY(IP_LEN_O),
+ HWS_DEFINER_ENTRY(IP_LEN_I),
+ HWS_DEFINER_ENTRY(IP_TOS_O),
+ HWS_DEFINER_ENTRY(IP_TOS_I),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_O),
+ HWS_DEFINER_ENTRY(IPV6_FLOW_LABEL_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_DST_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_DST_31_0_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_O),
+ HWS_DEFINER_ENTRY(IPV6_SRC_127_96_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_95_64_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_63_32_I),
+ HWS_DEFINER_ENTRY(IPV6_SRC_31_0_I),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_O),
+ HWS_DEFINER_ENTRY(IP_PROTOCOL_I),
+ HWS_DEFINER_ENTRY(L4_SPORT_O),
+ HWS_DEFINER_ENTRY(L4_SPORT_I),
+ HWS_DEFINER_ENTRY(L4_DPORT_O),
+ HWS_DEFINER_ENTRY(L4_DPORT_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_I),
+ HWS_DEFINER_ENTRY(TCP_FLAGS_O),
+ HWS_DEFINER_ENTRY(TCP_SEQ_NUM),
+ HWS_DEFINER_ENTRY(TCP_ACK_NUM),
+ HWS_DEFINER_ENTRY(GTP_TEID),
+ HWS_DEFINER_ENTRY(GTP_MSG_TYPE),
+ HWS_DEFINER_ENTRY(GTP_EXT_FLAG),
+ HWS_DEFINER_ENTRY(GTP_NEXT_EXT_HDR),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_PDU),
+ HWS_DEFINER_ENTRY(GTP_EXT_HDR_QFI),
+ HWS_DEFINER_ENTRY(GTPU_DW0),
+ HWS_DEFINER_ENTRY(GTPU_FIRST_EXT_DW0),
+ HWS_DEFINER_ENTRY(GTPU_DW2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_0),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_1),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_2),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_3),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_4),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_5),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_6),
+ HWS_DEFINER_ENTRY(FLEX_PARSER_7),
+ HWS_DEFINER_ENTRY(VPORT_REG_C_0),
+ HWS_DEFINER_ENTRY(VXLAN_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_FLAGS),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD0),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_PROTO),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_VNI),
+ HWS_DEFINER_ENTRY(VXLAN_GPE_RSVD1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_LEN),
+ HWS_DEFINER_ENTRY(GENEVE_OAM),
+ HWS_DEFINER_ENTRY(GENEVE_PROTO),
+ HWS_DEFINER_ENTRY(GENEVE_VNI),
+ HWS_DEFINER_ENTRY(SOURCE_QP),
+ HWS_DEFINER_ENTRY(SOURCE_GVMI),
+ HWS_DEFINER_ENTRY(REG_0),
+ HWS_DEFINER_ENTRY(REG_1),
+ HWS_DEFINER_ENTRY(REG_2),
+ HWS_DEFINER_ENTRY(REG_3),
+ HWS_DEFINER_ENTRY(REG_4),
+ HWS_DEFINER_ENTRY(REG_5),
+ HWS_DEFINER_ENTRY(REG_6),
+ HWS_DEFINER_ENTRY(REG_7),
+ HWS_DEFINER_ENTRY(REG_8),
+ HWS_DEFINER_ENTRY(REG_9),
+ HWS_DEFINER_ENTRY(REG_10),
+ HWS_DEFINER_ENTRY(REG_11),
+ HWS_DEFINER_ENTRY(REG_A),
+ HWS_DEFINER_ENTRY(REG_B),
+ HWS_DEFINER_ENTRY(GRE_KEY_PRESENT),
+ HWS_DEFINER_ENTRY(GRE_C),
+ HWS_DEFINER_ENTRY(GRE_K),
+ HWS_DEFINER_ENTRY(GRE_S),
+ HWS_DEFINER_ENTRY(GRE_PROTOCOL),
+ HWS_DEFINER_ENTRY(GRE_OPT_KEY),
+ HWS_DEFINER_ENTRY(GRE_OPT_SEQ),
+ HWS_DEFINER_ENTRY(GRE_OPT_CHECKSUM),
+ HWS_DEFINER_ENTRY(INTEGRITY_O),
+ HWS_DEFINER_ENTRY(INTEGRITY_I),
+ HWS_DEFINER_ENTRY(ICMP_DW1),
+ HWS_DEFINER_ENTRY(ICMP_DW2),
+ HWS_DEFINER_ENTRY(ICMP_DW3),
+ HWS_DEFINER_ENTRY(IPSEC_SPI),
+ HWS_DEFINER_ENTRY(IPSEC_SEQUENCE_NUMBER),
+ HWS_DEFINER_ENTRY(IPSEC_SYNDROME),
+ HWS_DEFINER_ENTRY(MPLS0_O),
+ HWS_DEFINER_ENTRY(MPLS1_O),
+ HWS_DEFINER_ENTRY(MPLS2_O),
+ HWS_DEFINER_ENTRY(MPLS3_O),
+ HWS_DEFINER_ENTRY(MPLS4_O),
+ HWS_DEFINER_ENTRY(MPLS0_I),
+ HWS_DEFINER_ENTRY(MPLS1_I),
+ HWS_DEFINER_ENTRY(MPLS2_I),
+ HWS_DEFINER_ENTRY(MPLS3_I),
+ HWS_DEFINER_ENTRY(MPLS4_I),
+ HWS_DEFINER_ENTRY(FLEX_PARSER0_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER1_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER2_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER3_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER4_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER5_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER6_OK),
+ HWS_DEFINER_ENTRY(FLEX_PARSER7_OK),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_O),
+ HWS_DEFINER_ENTRY(OKS2_MPLS0_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS1_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS2_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS3_I),
+ HWS_DEFINER_ENTRY(OKS2_MPLS4_I),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_OK_7),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_0),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_1),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_2),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_3),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_4),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_5),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_6),
+ HWS_DEFINER_ENTRY(GENEVE_OPT_DW_7),
+ HWS_DEFINER_ENTRY(IB_L4_OPCODE),
+ HWS_DEFINER_ENTRY(IB_L4_QPN),
+ HWS_DEFINER_ENTRY(IB_L4_A),
+ HWS_DEFINER_ENTRY(RANDOM_NUM),
+ HWS_DEFINER_ENTRY(PTYPE_L2_O),
+ HWS_DEFINER_ENTRY(PTYPE_L2_I),
+ HWS_DEFINER_ENTRY(PTYPE_L3_O),
+ HWS_DEFINER_ENTRY(PTYPE_L3_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_I),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_O),
+ HWS_DEFINER_ENTRY(PTYPE_L4_EXT_I),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_O),
+ HWS_DEFINER_ENTRY(PTYPE_FRAG_I),
+ HWS_DEFINER_ENTRY(TNL_HDR_0),
+ HWS_DEFINER_ENTRY(TNL_HDR_1),
+ HWS_DEFINER_ENTRY(TNL_HDR_2),
+ HWS_DEFINER_ENTRY(TNL_HDR_3),
+ [MLX5HWS_DEFINER_FNAME_MAX] = "DEFINER_FNAME_UNKNOWN",
+};
+
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname)
+{
+ if (fname > MLX5HWS_DEFINER_FNAME_MAX)
+ fname = MLX5HWS_DEFINER_FNAME_MAX;
+ return hws_definer_fname_to_str[fname];
+}
+
static void
hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
void *match_param,
@@ -509,7 +721,7 @@ static int
hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
@@ -521,6 +733,20 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ outer_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, outer_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
outer_headers.ethertype,
@@ -570,10 +796,16 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, outer_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
- if (is_s_ipv6) {
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -587,13 +819,6 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_outer.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_outer.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -608,6 +833,10 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_outer.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_O,
outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
@@ -665,7 +894,7 @@ static int
hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *match_param)
{
- bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ bool is_ipv6, smac_set, dmac_set, ip_addr_set, ip_ver_set;
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
u32 *s_ipv6, *d_ipv6;
@@ -677,6 +906,20 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
return -EINVAL;
}
+ ip_addr_set = HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.src_ipv4_src_ipv6,
+ 0x80) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ inner_headers.dst_ipv4_dst_ipv6, 0x80);
+ ip_ver_set = HWS_IS_FLD_SET(match_param, inner_headers.ip_version) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.ethertype);
+
+ if (ip_addr_set && !ip_ver_set) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported match on IP address without version or ethertype\n");
+ return -EINVAL;
+ }
+
/* L2 Check ethertype */
HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
inner_headers.ethertype,
@@ -728,10 +971,16 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
/* Assume IPv6 is used if ipv6 bits are set */
- is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
- is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+ is_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2] ||
+ d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
- if (is_s_ipv6) {
+ /* IHL is an IPv4-specific field. */
+ if (is_ipv6 && HWS_IS_FLD_SET(match_param, inner_headers.ipv4_ihl)) {
+ mlx5hws_err(cd->ctx, "Unsupported match on IPv6 address and IPv4 IHL\n");
+ return -EINVAL;
+ }
+
+ if (is_ipv6) {
/* Handle IPv6 source address */
HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -745,13 +994,6 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_src_inner.ipv6_address_31_0);
- } else {
- /* Handle IPv4 source address */
- HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
- ipv4_src_dest_inner.source_address);
- }
- if (is_d_ipv6) {
/* Handle IPv6 destination address */
HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
@@ -766,6 +1008,10 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
ipv6_dst_inner.ipv6_address_31_0);
} else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
/* Handle IPv4 destination address */
HWS_SET_HDR(fc, match_param, IPV4_DST_I,
inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 5c1a2086efba..62da55389331 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -831,4 +831,6 @@ mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
u32 *match_param,
int *fc_sz);
+const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
+
#endif /* HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 1b787cd66e6f..9d1c0e4b224a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -1081,13 +1081,8 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5hws_bwc_rule *rule;
int err = 0;
- if (mlx5_fs_cmd_is_fw_term_table(ft)) {
- /* Packet reformat on terminamtion table not supported yet */
- if (fte->act_dests.action.action &
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
- return -EOPNOTSUPP;
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
- }
err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
if (err)
@@ -1362,7 +1357,7 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
pkt_reformat->fs_hws_action.pr_data = pr_data;
}
- pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
pkt_reformat->fs_hws_action.hws_action = hws_action;
return 0;
@@ -1380,6 +1375,15 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
struct mlx5_fs_hws_pr *pr_data;
struct mlx5_fs_pool *pr_pool;
+ if (pkt_reformat->fs_hws_action.fw_reformat_id != 0) {
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+
+ fw_pkt_reformat.id = pkt_reformat->fs_hws_action.fw_reformat_id;
+ mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_dealloc(ns, &fw_pkt_reformat);
+ pkt_reformat->fs_hws_action.fw_reformat_id = 0;
+ }
+
if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
return;
@@ -1499,6 +1503,7 @@ static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
err = -ENOMEM;
goto release_mh;
}
+ mutex_init(&modify_hdr->fs_hws_action.lock);
modify_hdr->fs_hws_action.mh_data = mh_data;
modify_hdr->fs_hws_action.fs_pool = pool;
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
@@ -1532,6 +1537,58 @@ static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *
modify_hdr->fs_hws_action.mh_data = NULL;
}
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ enum mlx5_flow_namespace_type ns_type = pkt_reformat->ns_type;
+ struct mutex *lock = &pkt_reformat->fs_hws_action.lock;
+ u32 *id = &pkt_reformat->fs_hws_action.fw_reformat_id;
+ struct mlx5_pkt_reformat fw_pkt_reformat = { 0 };
+ struct mlx5_pkt_reformat_params params = { 0 };
+ struct mlx5_flow_root_namespace *ns;
+ struct mlx5_core_dev *dev;
+ int ret;
+
+ mutex_lock(lock);
+
+ if (*id != 0) {
+ *reformat_id = *id;
+ ret = 0;
+ goto unlock;
+ }
+
+ dev = mlx5hws_action_get_dev(pkt_reformat->fs_hws_action.hws_action);
+ if (!dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ns = mlx5_get_root_namespace(dev, ns_type);
+ if (!ns) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ params.type = pkt_reformat->reformat_type;
+ params.size = pkt_reformat->fs_hws_action.pr_data->data_size;
+ params.data = pkt_reformat->fs_hws_action.pr_data->data;
+
+ ret = mlx5_fs_cmd_get_fw_cmds()->
+ packet_reformat_alloc(ns, &params, ns_type, &fw_pkt_reformat);
+ if (ret)
+ goto unlock;
+
+ *id = fw_pkt_reformat.id;
+ *reformat_id = *id;
+ ret = 0;
+
+unlock:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index 8b56298288da..b92d55b2d147 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -41,6 +41,11 @@ struct mlx5_fs_hws_action {
struct mlx5_fs_pool *fs_pool;
struct mlx5_fs_hws_pr *pr_data;
struct mlx5_fs_hws_mh *mh_data;
+ u32 fw_reformat_id;
+ /* Protect `fw_reformat_id` against being initialized from multiple
+ * threads.
+ */
+ struct mutex lock;
};
struct mlx5_fs_hws_matcher {
@@ -84,12 +89,23 @@ void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
#ifdef CONFIG_MLX5_HW_STEERING
+int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
+
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
#else
+static inline int
+mlx5_fs_hws_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
+{
+ return -EOPNOTSUPP;
+}
+
static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
{
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 30ccd635b505..21279d503117 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -17,6 +17,7 @@
#include "context.h"
#include "table.h"
#include "send.h"
+#include "action_ste_pool.h"
#include "rule.h"
#include "cmd.h"
#include "action.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index b61864b32053..ce28ee1c0e41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -3,25 +3,6 @@
#include "internal.h"
-enum mlx5hws_matcher_rtc_type {
- HWS_MATCHER_RTC_TYPE_MATCH,
- HWS_MATCHER_RTC_TYPE_STE_ARRAY,
- HWS_MATCHER_RTC_TYPE_MAX,
-};
-
-static const char * const mlx5hws_matcher_rtc_type_str[] = {
- [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
- [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
- [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
-};
-
-static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
-{
- if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
- rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
- return mlx5hws_matcher_rtc_type_str[rtc_type];
-}
-
static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
{
/* Collision table concatenation is done only for large rule tables */
@@ -42,19 +23,199 @@ static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_matcher *tmp_matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return -EINVAL;
+
+ /* Update isolated_matcher_end_ft_id attribute for all
+ * the matchers in isolated table.
+ */
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node)
+ tmp_matcher->attr.isolated_matcher_end_ft_id = miss_ft_id;
+
+ tmp_matcher = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ return mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tmp_matcher->end_ft_id,
+ tbl->fw_ft_type,
+ miss_ft_id);
+}
+
+static int hws_matcher_connect_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 end_ft_id;
+ int ret;
+
+ /* Reset end_ft next RTCs */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ /* Connect isolated matcher's end_ft to the complex matcher's end FT */
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ matcher->end_ft_id,
+ matcher->tbl->fw_ft_type,
+ end_ft_id);
+
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
+ return ret;
+ }
+
+ ret = hws_matcher_connect_end_ft_isolated(matcher);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to connect end FT\n");
+ goto destroy_default_ft;
+ }
+
+ return 0;
+
+destroy_default_ft:
+ mlx5hws_table_destroy_default_ft(tbl, matcher->end_ft_id);
+ return ret;
+}
+
static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
int ret;
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (mlx5hws_matcher_is_isolated(matcher))
+ ret = hws_matcher_create_end_ft_isolated(matcher);
+ else
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
+ &matcher->end_ft_id);
+
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
return ret;
}
+
return 0;
}
+static int hws_matcher_connect_isolated_first(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ /* Isolated matcher's end_ft is already pointing to the end_ft
+ * of the complex matcher - it was set at creation of end_ft,
+ * so no need to connect it.
+ * We still need to connect the isolated table's start FT to
+ * this matcher's RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to connect start FT to match RTC\n");
+ return ret;
+ }
+
+ /* Reset table's FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset table ft default miss\n");
+ return ret;
+ }
+
+ list_add(&matcher->list_node, &tbl->matchers_list);
+
+ return ret;
+}
+
+static int hws_matcher_connect_isolated_last(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *last;
+ int ret;
+
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+
+ /* New matcher's end_ft is already pointing to the end_ft of
+ * the complex matcher.
+ * Connect previous matcher's end_ft to this new matcher RTC.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ last->end_ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Isolated matcher: failed to connect matcher end_ft to new match RTC\n");
+ return ret;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, last->end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Isolated matcher: failed to reset matcher ft default miss\n");
+ return ret;
+ }
+
+ /* Insert after the last matcher */
+ list_add(&matcher->list_node, &last->list_node);
+
+ return 0;
+}
+
+static int hws_matcher_connect_isolated(struct mlx5hws_matcher *matcher)
+{
+ /* Isolated matcher is expected to be the only one in its table.
+ * However, it can have a collision matcher, and it can go through
+ * rehash process, in which case we will temporary have both old and
+ * new matchers in the isolated table.
+ * Check if this is the first matcher in the isolated table.
+ */
+ if (list_empty(&matcher->tbl->matchers_list))
+ return hws_matcher_connect_isolated_first(matcher);
+
+ /* If this wasn't the first matcher, then we have 3 possible cases:
+ * - this is a collision matcher for the first matcher
+ * - this is a new rehash dest matcher
+ * - this is a collision matcher for the new rehash dest matcher
+ * The logic to add new matcher is the same for all these cases.
+ */
+ return hws_matcher_connect_isolated_last(matcher);
+}
+
static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_table *tbl = matcher->tbl;
@@ -64,6 +225,9 @@ static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
struct mlx5hws_matcher *tmp_matcher;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_connect_isolated(matcher);
+
/* Find location in matcher list */
if (list_empty(&tbl->matchers_list)) {
list_add(&matcher->list_node, &tbl->matchers_list);
@@ -140,6 +304,92 @@ remove_from_list:
return ret;
}
+static int hws_matcher_disconnect_isolated(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *first, *last, *prev, *next;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 end_ft_id;
+ int ret;
+
+ first = list_first_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ last = list_last_entry(&tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ prev = list_prev_entry(matcher, list_node);
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (first == last) {
+ /* This was the only matcher in the list.
+ * Reset isolated table FT next RTCs and connect it
+ * to the whole complex matcher end FT instead.
+ */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ 0, 0);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to reset FT's next RTCs\n");
+ return ret;
+ }
+
+ end_ft_id = matcher->attr.isolated_matcher_end_ft_id;
+ ret = mlx5hws_table_ft_set_next_ft(tbl->ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to set FT's miss_ft_id\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* At this point we know that there are more matchers in the list */
+
+ if (matcher == first) {
+ /* We've disconnected the first matcher.
+ * Now update isolated table default FT.
+ */
+ if (!next)
+ return -EINVAL;
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ tbl->ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ }
+
+ if (matcher == last) {
+ /* If we've disconnected the last matcher - update prev
+ * matcher's end_ft to point to the complex matcher end_ft.
+ */
+ if (!prev)
+ return -EINVAL;
+ return hws_matcher_connect_end_ft_isolated(prev);
+ }
+
+ /* This wasn't the first or the last matcher, which means that it has
+ * both prev and next matchers. Note that this only happens if we're
+ * disconnecting collision matcher of the old matcher during rehash.
+ */
+ if (!prev || !next ||
+ !(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ return -EINVAL;
+
+ /* Update prev end FT to point to next match RTC */
+ return mlx5hws_table_ft_set_next_rtc(ctx,
+ prev->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+}
+
static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher *next = NULL, *prev = NULL;
@@ -147,6 +397,9 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
u32 prev_ft_id = tbl->ft_id;
int ret;
+ if (mlx5hws_matcher_is_isolated(matcher))
+ return hws_matcher_disconnect_isolated(matcher);
+
if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
prev = list_prev_entry(matcher, list_node);
prev_ft_id = prev->end_ft_id;
@@ -197,149 +450,96 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
- enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
- bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
(flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
/* Optimize FDB RTC */
rtc_attr->log_size = 0;
rtc_attr->log_depth = 0;
- } else {
- /* Keep original values */
- rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
- rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
}
}
-static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type)
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
struct mlx5hws_match_template *mt = matcher->mt;
struct mlx5hws_context *ctx = matcher->tbl->ctx;
- struct mlx5hws_action_default_stc *default_stc;
- struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool *ste_pool, *stc_pool;
- struct mlx5hws_pool_chunk *ste;
- u32 *rtc_0_id, *rtc_1_id;
u32 obj_id;
int ret;
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = &matcher->match_ste.rtc_0_id;
- rtc_1_id = &matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
-
- rtc_attr.log_size = attr->table.sz_row_log;
- rtc_attr.log_depth = attr->table.sz_col_log;
- rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
- rtc_attr.is_scnd_range = 0;
- rtc_attr.miss_ft_id = matcher->end_ft_id;
-
- if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
- /* The usual Hash Table */
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
-
- /* The first mt is used since all share the same definer */
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- rtc_attr.num_hash_definer = 1;
-
- if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
- /* Hash Split Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
- rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
- } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
- /* Linear Lookup Table */
- rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
- rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
- }
- }
-
- /* Match pool requires implicit allocation */
- ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
- hws_matcher_rtc_type_to_str(rtc_type));
- return ret;
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode =
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 =
+ mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode ==
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode =
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 =
+ ctx->caps->linear_match_definer;
}
- break;
-
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste;
-
- rtc_0_id = &action_ste->rtc_0_id;
- rtc_1_id = &action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- /* Action RTC size calculation:
- * log((max number of rules in matcher) *
- * (max number of action STEs per rule) *
- * (2 to support writing new STEs for update rule))
- */
- ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log +
- MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
- rtc_attr.log_size = ste->order;
- rtc_attr.log_depth = 0;
- rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
- /* The action STEs use the default always hit definer */
- rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
- rtc_attr.is_frst_jumbo = false;
- rtc_attr.miss_ft_id = 0;
- break;
-
- default:
- mlx5hws_err(ctx, "HWS Invalid RTC type\n");
- return -EINVAL;
}
- obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_id(matcher->match_ste.pool);
rtc_attr.pd = ctx->pd_num;
rtc_attr.ste_base = obj_id;
- rtc_attr.ste_offset = ste->offset;
rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool;
- default_stc = ctx->common_res.default_stc;
- obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_0_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
- goto free_ste;
+ mlx5hws_err(ctx, "Failed to create matcher RTC\n");
+ return ret;
}
if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ obj_id = mlx5hws_pool_get_base_mirror_id(
+ matcher->match_ste.pool);
rtc_attr.ste_base = obj_id;
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
- obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ obj_id = mlx5hws_pool_get_base_mirror_id(ctx->stc_pool);
rtc_attr.stc_base = obj_id;
- hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, true);
- ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr,
+ &matcher->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
- hws_matcher_rtc_type_to_str(rtc_type));
+ mlx5hws_err(ctx, "Failed to create mirror matcher RTC\n");
goto destroy_rtc_0;
}
}
@@ -347,46 +547,18 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
return 0;
destroy_rtc_0:
- mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
-free_ste:
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, matcher->match_ste.rtc_0_id);
return ret;
}
-static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type)
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher)
{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_chunk *ste;
- struct mlx5hws_pool *ste_pool;
- u32 rtc_0_id, rtc_1_id;
-
- switch (rtc_type) {
- case HWS_MATCHER_RTC_TYPE_MATCH:
- rtc_0_id = matcher->match_ste.rtc_0_id;
- rtc_1_id = matcher->match_ste.rtc_1_id;
- ste_pool = matcher->match_ste.pool;
- ste = &matcher->match_ste.ste;
- break;
- case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste;
- rtc_0_id = action_ste->rtc_0_id;
- rtc_1_id = action_ste->rtc_1_id;
- ste_pool = action_ste->pool;
- ste = &action_ste->ste;
- break;
- default:
- return;
- }
+ struct mlx5_core_dev *mdev = matcher->tbl->ctx->mdev;
- if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
- if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
- mlx5hws_pool_chunk_free(ste_pool, ste);
+ mlx5hws_cmd_rtc_destroy(mdev, matcher->match_ste.rtc_0_id);
}
static int
@@ -454,85 +626,17 @@ static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
return 0;
}
-static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
- if (!resize_data)
- return -ENOMEM;
-
- resize_data->max_stes = src_matcher->action_ste.max_stes;
-
- resize_data->stc = src_matcher->action_ste.stc;
- resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id;
- resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id;
- resize_data->pool = src_matcher->action_ste.max_stes ?
- src_matcher->action_ste.pool : NULL;
-
- /* Place the new resized matcher on the dst matcher's list */
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
-
- /* Move all the previous resized matchers to the dst matcher's list */
- while (!list_empty(&src_matcher->resize_data)) {
- resize_data = list_first_entry(&src_matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
- list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
- }
-
- return 0;
-}
-
-static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
-{
- struct mlx5hws_matcher_resize_data *resize_data;
-
- if (!mlx5hws_matcher_is_resizable(matcher))
- return;
-
- while (!list_empty(&matcher->resize_data)) {
- resize_data = list_first_entry(&matcher->resize_data,
- struct mlx5hws_matcher_resize_data,
- list_node);
- list_del_init(&resize_data->list_node);
-
- if (resize_data->max_stes) {
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->stc);
-
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->rtc_1_id);
-
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->rtc_0_id);
-
- if (resize_data->pool)
- mlx5hws_pool_destroy(resize_data->pool);
- }
-
- kfree(resize_data);
- }
-}
-
static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_pool_attr pool_attr = {0};
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 required_stes, max_stes;
int i, ret;
if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
return 0;
+ max_stes = 0;
for (i = 0; i < matcher->num_of_at; i++) {
struct mlx5hws_action_template *at = &matcher->at[i];
@@ -548,75 +652,33 @@ static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
/* Future: Optimize reparse */
}
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste.max_stes = max_stes;
-
- action_ste = &matcher->action_ste;
-
- /* Allocate action STE mempool */
- pool_attr.table_type = tbl->type;
- pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
- /* Pool size is similar to action RTC size */
- pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log +
- MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
- hws_matcher_set_pool_attr(&pool_attr, matcher);
- action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
- if (!action_ste->pool) {
- mlx5hws_err(ctx, "Failed to create action ste pool\n");
- return -EINVAL;
- }
-
- /* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action RTC\n");
- goto free_ste_pool;
- }
-
- /* Allocate STC for jumps to STE */
- stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
- stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
- stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
- stc_attr.ste_table.ste = action_ste->ste;
- stc_attr.ste_table.ste_pool = action_ste->pool;
- stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
-
- ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
- &action_ste->stc);
- if (ret) {
- mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
- goto free_rtc;
- }
+ matcher->num_of_action_stes = max_stes;
return 0;
-
-free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
-free_ste_pool:
- mlx5hws_pool_destroy(action_ste->pool);
- return ret;
}
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+static void hws_matcher_set_ip_version_match(struct mlx5hws_matcher *matcher)
{
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_table *tbl = matcher->tbl;
-
- action_ste = &matcher->action_ste;
-
- if (!action_ste->max_stes ||
- matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
- mlx5hws_matcher_is_in_resize(matcher))
- return;
+ int i;
- mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
- mlx5hws_pool_destroy(action_ste->pool);
+ for (i = 0; i < matcher->mt->fc_sz; i++) {
+ switch (matcher->mt->fc[i].fname) {
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
+ matcher->matches_outer_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
+ matcher->matches_outer_ip_version = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
+ matcher->matches_inner_ethertype = 1;
+ break;
+ case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
+ matcher->matches_inner_ip_version = 1;
+ break;
+ default:
+ break;
+ }
+ }
}
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
@@ -635,10 +697,11 @@ static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
}
}
+ hws_matcher_set_ip_version_match(matcher);
+
/* Create an STE pool per matcher*/
pool_attr.table_type = matcher->tbl->type;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
- pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
matcher->attr.table.sz_row_log;
hws_matcher_set_pool_attr(&pool_attr, matcher);
@@ -740,6 +803,8 @@ hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+ matcher->flags |= attr->isolated_matcher_end_ft_id ?
+ MLX5HWS_MATCHER_FLAGS_ISOLATED : 0;
return hws_matcher_check_attr_sz(caps, matcher);
}
@@ -761,10 +826,10 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
/* Create matcher end flow table anchor */
ret = hws_matcher_create_end_ft(matcher);
if (ret)
- goto unbind_at;
+ goto unbind_mt;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
+ ret = hws_matcher_create_rtc(matcher);
if (ret)
goto destroy_end_ft;
@@ -776,11 +841,9 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
+ hws_matcher_destroy_rtc(matcher);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
-unbind_at:
- hws_matcher_unbind_at(matcher);
unbind_mt:
hws_matcher_unbind_mt(matcher);
return ret;
@@ -788,11 +851,9 @@ unbind_mt:
static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
- hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
+ hws_matcher_destroy_rtc(matcher);
hws_matcher_destroy_end_ft(matcher);
- hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
}
@@ -814,8 +875,6 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
if (!col_matcher)
return -ENOMEM;
- INIT_LIST_HEAD(&col_matcher->resize_data);
-
col_matcher->tbl = matcher->tbl;
col_matcher->mt = matcher->mt;
col_matcher->at = matcher->at;
@@ -832,6 +891,8 @@ hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+ col_matcher->attr.isolated_matcher_end_ft_id =
+ matcher->attr.isolated_matcher_end_ft_id;
ret = hws_matcher_process_attr(ctx->caps, col_matcher);
if (ret)
@@ -869,8 +930,6 @@ static int hws_matcher_init(struct mlx5hws_matcher *matcher)
struct mlx5hws_context *ctx = matcher->tbl->ctx;
int ret;
- INIT_LIST_HEAD(&matcher->resize_data);
-
mutex_lock(&ctx->ctrl_lock);
/* Allocate matcher resource and connect to the packet pipe */
@@ -905,18 +964,44 @@ static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
return 0;
}
+static int hws_matcher_grow_at_array(struct mlx5hws_matcher *matcher)
+{
+ void *p;
+
+ if (matcher->size_of_at_array >= MLX5HWS_MATCHER_MAX_AT)
+ return -ENOMEM;
+
+ matcher->size_of_at_array *= 2;
+ p = krealloc(matcher->at,
+ matcher->size_of_at_array * sizeof(*matcher->at),
+ __GFP_ZERO | GFP_KERNEL);
+ if (!p) {
+ matcher->size_of_at_array /= 2;
+ return -ENOMEM;
+ }
+
+ matcher->at = p;
+
+ return 0;
+}
+
int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
struct mlx5hws_action_template *at)
{
bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_context *ctx = matcher->tbl->ctx;
u32 required_stes;
int ret;
- if (!matcher->attr.max_num_of_at_attach) {
- mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
- matcher->num_of_at);
- return -EOPNOTSUPP;
+ if (unlikely(matcher->num_of_at >= matcher->size_of_at_array)) {
+ ret = hws_matcher_grow_at_array(matcher);
+ if (ret)
+ return ret;
+
+ if (matcher->col_matcher) {
+ ret = hws_matcher_grow_at_array(matcher->col_matcher);
+ if (ret)
+ return ret;
+ }
}
ret = hws_matcher_check_and_process_at(matcher, at);
@@ -924,15 +1009,11 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste.max_stes < required_stes) {
- mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes, matcher->action_ste.max_stes);
- return -ENOMEM;
- }
+ if (matcher->num_of_action_stes < required_stes)
+ matcher->num_of_action_stes = required_stes;
matcher->at[matcher->num_of_at] = *at;
matcher->num_of_at += 1;
- matcher->attr.max_num_of_at_attach -= 1;
if (matcher->col_matcher)
matcher->col_matcher->num_of_at = matcher->num_of_at;
@@ -960,8 +1041,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
+ matcher->size_of_at_array =
+ num_of_at + matcher->attr.max_num_of_at_attach;
+ matcher->at = kvcalloc(matcher->size_of_at_array, sizeof(*matcher->at),
GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
@@ -1110,7 +1192,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) {
+ if (src_matcher->num_of_action_stes > dst_matcher->num_of_action_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
@@ -1139,10 +1221,6 @@ int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
src_matcher->resize_dst = dst_matcher;
- ret = hws_matcher_resize_init(src_matcher);
- if (ret)
- src_matcher->resize_dst = NULL;
-
out:
mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 020de70270c5..32e83cddcd60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -23,6 +23,9 @@
*/
#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+/* Maximum number of action templates that can be attached to a matcher. */
+#define MLX5HWS_MATCHER_MAX_AT 128
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -31,6 +34,7 @@ enum mlx5hws_matcher_offset {
enum mlx5hws_matcher_flags {
MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+ MLX5HWS_MATCHER_FLAGS_ISOLATED = 1 << 4,
};
struct mlx5hws_match_template {
@@ -42,28 +46,15 @@ struct mlx5hws_match_template {
};
struct mlx5hws_matcher_match_ste {
- struct mlx5hws_pool_chunk ste;
u32 rtc_0_id;
u32 rtc_1_id;
struct mlx5hws_pool *pool;
};
-struct mlx5hws_matcher_action_ste {
- struct mlx5hws_pool_chunk ste;
- struct mlx5hws_pool_chunk stc;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
- u8 max_stes;
-};
-
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_pool_chunk stc;
- u32 rtc_0_id;
- u32 rtc_1_id;
- struct mlx5hws_pool *pool;
- u8 max_stes;
- struct list_head list_node;
+enum {
+ MLX5HWS_MATCHER_IPV_UNSET = 0,
+ MLX5HWS_MATCHER_IPV_4 = 1,
+ MLX5HWS_MATCHER_IPV_6 = 2,
};
struct mlx5hws_matcher {
@@ -72,16 +63,22 @@ struct mlx5hws_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at;
u8 num_of_at;
+ u8 size_of_at_array;
u8 num_of_mt;
+ u8 num_of_action_stes;
/* enum mlx5hws_matcher_flags */
u8 flags;
+ u8 matches_outer_ethertype:1;
+ u8 matches_outer_ip_version:1;
+ u8 matches_inner_ethertype:1;
+ u8 matches_inner_ip_version:1;
+ u8 outer_ip_version:2;
+ u8 inner_ip_version:2;
u32 end_ft_id;
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste;
struct list_head list_node;
- struct list_head resize_data;
};
static inline bool
@@ -100,9 +97,17 @@ static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
return !!matcher->resize_dst;
}
+static inline bool mlx5hws_matcher_is_isolated(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED);
+}
+
static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
{
return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
}
+int mlx5hws_matcher_update_end_ft_isolated(struct mlx5hws_table *tbl,
+ u32 miss_ft_id);
+
#endif /* HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 5121951f2778..9bbadc4d8a0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -119,6 +119,8 @@ struct mlx5hws_matcher_attr {
};
/* Optional AT attach configuration - Max number of additional AT */
u8 max_num_of_at_attach;
+ /* Optional end FT (miss FT ID) for match RTC (for isolated matcher) */
+ u32 isolated_matcher_end_ft_id;
};
struct mlx5hws_rule_attr {
@@ -502,6 +504,15 @@ enum mlx5hws_action_type
mlx5hws_action_get_type(struct mlx5hws_action *action);
/**
+ * mlx5hws_action_get_dev - Get mlx5 core device.
+ *
+ * @action: The action to get the device from.
+ *
+ * Return: mlx5 core device.
+ */
+struct mlx5_core_dev *mlx5hws_action_get_dev(struct mlx5hws_action *action);
+
+/**
* mlx5hws_action_create_dest_drop - Create a direct rule drop action.
*
* @ctx: The context in which the new action will be created.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index f51ed24526b9..51e4c551e0ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -490,8 +490,8 @@ hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
switch (action_type) {
case MLX5_ACTION_TYPE_SET:
case MLX5_ACTION_TYPE_ADD:
- *src_field = MLX5_GET(set_action_in, pattern, field);
- *dst_field = INVALID_FIELD;
+ *src_field = INVALID_FIELD;
+ *dst_field = MLX5_GET(set_action_in, pattern, field);
break;
case MLX5_ACTION_TYPE_COPY:
*src_field = MLX5_GET(copy_action_in, pattern, src_field);
@@ -522,57 +522,59 @@ bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], s
return true;
}
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
- size_t max_actions, size_t *new_size,
- u32 *nope_location, __be64 *new_pat)
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat)
{
- u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
u16 src_field, dst_field;
u8 action_type;
+ bool dependent;
size_t i, j;
*new_size = num_actions;
- *nope_location = 0;
+ *nop_locations = 0;
if (num_actions == 1)
- return;
+ return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
- action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (j >= max_actions)
+ return -EINVAL;
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
hws_action_modify_get_target_fields(action_type, &pattern[i],
&src_field, &dst_field);
- if (i % 2) {
- if (action_type == MLX5_ACTION_TYPE_COPY &&
- (prev_src_field == src_field ||
- prev_dst_field == dst_field)) {
- /* need Nope */
- *new_size += 1;
- *nope_location |= BIT(i);
- memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- } else if (prev_src_field == src_field) {
- /* need Nope*/
- *new_size += 1;
- *nope_location |= BIT(i);
- MLX5_SET(set_action_in, &new_pat[j],
- action_type,
- MLX5_MODIFICATION_TYPE_NOP);
- j++;
- }
- }
- memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
- /* check if no more space */
- if (j > max_actions) {
- *new_size = num_actions;
- *nope_location = 0;
- return;
+
+ /* For every action, look at it and the previous one. The two
+ * actions are dependent if:
+ */
+ dependent =
+ (i > 0) &&
+ /* At least one of the actions is a write and */
+ (dst_field != INVALID_FIELD ||
+ prev_dst_field != INVALID_FIELD) &&
+ /* One reads from the other's source */
+ (dst_field == prev_src_field ||
+ src_field == prev_dst_field ||
+ /* Or both write to the same destination */
+ dst_field == prev_dst_field);
+
+ if (dependent) {
+ *new_size += 1;
+ *nop_locations |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j], action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ if (j >= max_actions)
+ return -EINVAL;
}
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
prev_src_field = src_field;
prev_dst_field = dst_field;
}
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 8ddb51980044..7fbd8dc7aa18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -96,6 +96,7 @@ int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
u8 *arg_data,
size_t data_size);
-void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
- size_t *new_size, u32 *nope_location, __be64 *new_pat);
+int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nop_locations, __be64 *new_pat);
#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index 50a81d360bb2..7e37d6e9eb83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -20,15 +20,14 @@ static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
kfree(resource);
}
-static void hws_pool_resource_free(struct mlx5hws_pool *pool,
- int resource_idx)
+static void hws_pool_resource_free(struct mlx5hws_pool *pool)
{
- hws_pool_free_one_resource(pool->resource[resource_idx]);
- pool->resource[resource_idx] = NULL;
+ hws_pool_free_one_resource(pool->resource);
+ pool->resource = NULL;
if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
- hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
- pool->mirror_resource[resource_idx] = NULL;
+ hws_pool_free_one_resource(pool->mirror_resource);
+ pool->mirror_resource = NULL;
}
}
@@ -61,10 +60,8 @@ hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
ret = -EINVAL;
}
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ if (ret)
goto free_resource;
- }
resource->pool = pool;
resource->range = 1 << log_range;
@@ -77,199 +74,114 @@ free_resource:
return NULL;
}
-static int
-hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+static int hws_pool_resource_alloc(struct mlx5hws_pool *pool)
{
struct mlx5hws_pool_resource *resource;
u32 fw_ft_type, opt_log_range;
fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ?
+ 0 : pool->alloc_log_sz;
resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
if (!resource) {
- mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ mlx5hws_err(pool->ctx, "Failed to allocate resource\n");
return -EINVAL;
}
- pool->resource[idx] = resource;
+ pool->resource = resource;
if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
struct mlx5hws_pool_resource *mirror_resource;
fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
- opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ?
+ 0 : pool->alloc_log_sz;
mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
if (!mirror_resource) {
- mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ mlx5hws_err(pool->ctx, "Failed to allocate mirrored resource\n");
hws_pool_free_one_resource(resource);
- pool->resource[idx] = NULL;
+ pool->resource = NULL;
return -EINVAL;
}
- pool->mirror_resource[idx] = mirror_resource;
+ pool->mirror_resource = mirror_resource;
}
return 0;
}
-static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
-{
- unsigned long *cur_bmp;
-
- cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
- if (!cur_bmp)
- return NULL;
-
- bitmap_fill(cur_bmp, 1 << log_range);
-
- return cur_bmp;
-}
-
-static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
{
struct mlx5hws_buddy_mem *buddy;
- buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ buddy = mlx5hws_buddy_create(pool->alloc_log_sz);
if (!buddy) {
- mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
- return;
- }
-
- mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
-}
-
-static struct mlx5hws_buddy_mem *
-hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
- u32 order, bool *is_new_buddy)
-{
- static struct mlx5hws_buddy_mem *buddy;
- u32 new_buddy_size;
-
- buddy = pool->db.buddy_manager->buddies[idx];
- if (buddy)
- return buddy;
-
- new_buddy_size = max(pool->alloc_log_sz, order);
- *is_new_buddy = true;
- buddy = mlx5hws_buddy_create(new_buddy_size);
- if (!buddy) {
- mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
- new_buddy_size, idx);
- return NULL;
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %zu\n",
+ pool->alloc_log_sz);
+ return -ENOMEM;
}
- if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, new_buddy_size, idx);
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
+ pool->type, pool->alloc_log_sz);
mlx5hws_buddy_cleanup(buddy);
- return NULL;
+ return -ENOMEM;
}
- pool->db.buddy_manager->buddies[idx] = buddy;
+ pool->db.buddy = buddy;
- return buddy;
+ return 0;
}
-static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
- int order,
- u32 *buddy_idx,
- int *seg)
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- struct mlx5hws_buddy_mem *buddy;
- bool new_mem = false;
- int ret = 0;
- int i;
-
- *seg = -1;
-
- /* Find the next free place from the buddy array */
- while (*seg < 0) {
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = hws_pool_buddy_get_next_buddy(pool, i,
- order,
- &new_mem);
- if (!buddy) {
- ret = -ENOMEM;
- goto out;
- }
-
- *seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg >= 0)
- goto found;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
- mlx5hws_err(pool->ctx,
- "Fail to allocate seg for one resource pool\n");
- ret = -ENOMEM;
- goto out;
- }
-
- if (new_mem) {
- /* We have new memory pool, should be place for us */
- mlx5hws_err(pool->ctx,
- "No memory for order: %d with buddy no: %d\n",
- order, i);
- ret = -ENOMEM;
- goto out;
- }
- }
+ struct mlx5hws_buddy_mem *buddy = pool->db.buddy;
+
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return -EINVAL;
}
-found:
- *buddy_idx = i;
-out:
- return ret;
+ chunk->offset = mlx5hws_buddy_alloc_mem(buddy, chunk->order);
+ if (chunk->offset >= 0)
+ return 0;
+
+ return -ENOMEM;
}
-static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- int ret = 0;
+ struct mlx5hws_buddy_mem *buddy;
- /* Go over the buddies and find next free slot */
- ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
+ buddy = pool->db.buddy;
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Bad buddy state\n");
+ return;
+ }
- return ret;
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
}
static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
{
struct mlx5hws_buddy_mem *buddy;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- buddy = pool->db.buddy_manager->buddies[i];
- if (buddy) {
- mlx5hws_buddy_cleanup(buddy);
- kfree(buddy);
- pool->db.buddy_manager->buddies[i] = NULL;
- }
- }
- kfree(pool->db.buddy_manager);
+ buddy = pool->db.buddy;
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy = NULL;
+ }
}
-static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool)
{
- pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
- if (!pool->db.buddy_manager)
- return -ENOMEM;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
- bool new_buddy;
+ int ret;
- if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
- mlx5hws_err(pool->ctx,
- "Failed allocating memory on create log_sz: %d\n", log_range);
- kfree(pool->db.buddy_manager);
- return -ENOMEM;
- }
- }
+ ret = hws_pool_buddy_init(pool);
+ if (ret)
+ return ret;
pool->p_db_uninit = &hws_pool_buddy_db_uninit;
pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
@@ -278,261 +190,105 @@ static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
return 0;
}
-static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
- u32 alloc_size, int idx)
-{
- int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
-
- if (ret) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- return ret;
- }
-
- return 0;
-}
-
-static struct mlx5hws_pool_elements *
-hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
{
- struct mlx5hws_pool_elements *elem;
- u32 alloc_size;
-
- alloc_size = pool->alloc_log_sz;
+ unsigned long *bitmap;
- elem = kzalloc(sizeof(*elem), GFP_KERNEL);
- if (!elem)
+ bitmap = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!bitmap)
return NULL;
- /* Sharing the same resource, also means that all the elements are with size 1 */
- if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
- !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
- /* Currently all chunks in size 1 */
- elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
- if (!elem->bitmap) {
- mlx5hws_err(pool->ctx,
- "Failed to create bitmap type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_elem;
- }
-
- elem->log_size = alloc_size - order;
- }
-
- if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
- mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
- pool->type, alloc_size, idx);
- goto free_db;
- }
-
- pool->db.element_manager->elements[idx] = elem;
-
- return elem;
+ bitmap_fill(bitmap, 1 << log_range);
-free_db:
- bitmap_free(elem->bitmap);
-free_elem:
- kfree(elem);
- return NULL;
+ return bitmap;
}
-static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+static int hws_pool_bitmap_init(struct mlx5hws_pool *pool)
{
- unsigned int segment, size;
-
- size = 1 << elem->log_size;
+ unsigned long *bitmap;
- segment = find_first_bit(elem->bitmap, size);
- if (segment >= size) {
- elem->is_full = true;
+ bitmap = hws_pool_create_and_init_bitmap(pool->alloc_log_sz);
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Failed to create bitmap order: %zu\n",
+ pool->alloc_log_sz);
return -ENOMEM;
}
- bitmap_clear(elem->bitmap, segment, 1);
- *seg = segment;
- return 0;
-}
-
-static int
-hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- struct mlx5hws_pool_elements *elem;
-
- elem = pool->db.element_manager->elements[0];
- if (!elem)
- elem = hws_pool_element_create_new_elem(pool, order, 0);
- if (!elem)
- goto err_no_elem;
-
- if (hws_pool_element_find_seg(elem, seg) != 0) {
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ if (hws_pool_resource_alloc(pool) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %zu\n",
+ pool->type, pool->alloc_log_sz);
+ bitmap_free(bitmap);
return -ENOMEM;
}
- *idx = 0;
- elem->num_of_elements++;
- return 0;
+ pool->db.bitmap = bitmap;
-err_no_elem:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
-}
-
-static int
-hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
- u32 *idx, int *seg)
-{
- int ret, i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- if (!pool->resource[i]) {
- ret = hws_pool_create_resource_on_index(pool, order, i);
- if (ret)
- goto err_no_res;
- *idx = i;
- *seg = 0; /* One memory slot in that element */
- return 0;
- }
- }
-
- mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
- return -ENOMEM;
-
-err_no_res:
- mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
- return -ENOMEM;
+ return 0;
}
-static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static int hws_pool_bitmap_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- int ret;
-
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
+ unsigned long *bitmap, size;
- return ret;
-}
+ if (chunk->order != 0) {
+ mlx5hws_err(pool->ctx, "Pool only supports order 0 allocs\n");
+ return -EINVAL;
+ }
-static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
+ return -EINVAL;
+ }
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
- hws_pool_resource_free(pool, chunk->resource_idx);
-}
+ size = 1 << pool->alloc_log_sz;
-static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
-{
- (void)pool;
-}
+ chunk->offset = find_first_bit(bitmap, size);
+ if (chunk->offset >= size)
+ return -ENOMEM;
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * allocate resource and give it.
- * - When free that chunk:
- * the resource is freed.
- */
-static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
-{
- pool->p_db_uninit = &hws_pool_general_element_db_uninit;
- pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
- pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+ bitmap_clear(bitmap, chunk->offset, 1);
return 0;
}
-static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_elements *elem,
- struct mlx5hws_pool_chunk *chunk)
-{
- if (unlikely(!pool->resource[chunk->resource_idx]))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- hws_pool_resource_free(pool, chunk->resource_idx);
- kfree(elem);
- pool->db.element_manager->elements[chunk->resource_idx] = NULL;
-}
-
-static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_bitmap_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
{
- struct mlx5hws_pool_elements *elem;
+ unsigned long *bitmap;
- if (unlikely(chunk->resource_idx))
- pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
-
- elem = pool->db.element_manager->elements[chunk->resource_idx];
- if (!elem) {
- mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ bitmap = pool->db.bitmap;
+ if (!bitmap) {
+ mlx5hws_err(pool->ctx, "Bad bitmap state\n");
return;
}
- bitmap_set(elem->bitmap, chunk->offset, 1);
- elem->is_full = false;
- elem->num_of_elements--;
-
- if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
- !elem->num_of_elements)
- hws_onesize_element_db_destroy_element(pool, elem, chunk);
+ bitmap_set(bitmap, chunk->offset, 1);
}
-static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static void hws_pool_bitmap_db_uninit(struct mlx5hws_pool *pool)
{
- int ret = 0;
+ unsigned long *bitmap;
- /* Go over all memory elements and find/allocate free slot */
- ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
- &chunk->resource_idx,
- &chunk->offset);
- if (ret)
- mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
- chunk->order);
-
- return ret;
-}
-
-static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
-{
- struct mlx5hws_pool_elements *elem;
- int i;
-
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
- elem = pool->db.element_manager->elements[i];
- if (elem) {
- bitmap_free(elem->bitmap);
- kfree(elem);
- pool->db.element_manager->elements[i] = NULL;
- }
+ bitmap = pool->db.bitmap;
+ if (bitmap) {
+ bitmap_free(bitmap);
+ pool->db.bitmap = NULL;
}
- kfree(pool->db.element_manager);
}
-/* This memory management works as the following:
- * - At start doesn't allocate no mem at all.
- * - When new request for chunk arrived:
- * aloocate the first and only slot of memory/resource
- * when it ended return error.
- */
-static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+static int hws_pool_bitmap_db_init(struct mlx5hws_pool *pool)
{
- pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
- if (!pool->db.element_manager)
- return -ENOMEM;
+ int ret;
- pool->p_db_uninit = &hws_onesize_element_db_uninit;
- pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
- pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+ ret = hws_pool_bitmap_init(pool);
+ if (ret)
+ return ret;
+
+ pool->p_db_uninit = &hws_pool_bitmap_db_uninit;
+ pool->p_get_chunk = &hws_pool_bitmap_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_bitmap_db_put_chunk;
return 0;
}
@@ -542,15 +298,14 @@ static int hws_pool_db_init(struct mlx5hws_pool *pool,
{
int ret;
- if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
- ret = hws_pool_general_element_db_init(pool);
- else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
- ret = hws_pool_onesize_element_db_init(pool);
+ if (db_type == MLX5HWS_POOL_DB_TYPE_BITMAP)
+ ret = hws_pool_bitmap_db_init(pool);
else
- ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+ ret = hws_pool_buddy_db_init(pool);
if (ret) {
- mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ mlx5hws_err(pool->ctx, "Failed to init pool type: %d (ret: %d)\n",
+ db_type, ret);
return ret;
}
@@ -569,6 +324,8 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
mutex_lock(&pool->lock);
ret = pool->p_get_chunk(pool, chunk);
+ if (ret == 0)
+ pool->available_elems -= 1 << chunk->order;
mutex_unlock(&pool->lock);
return ret;
@@ -579,6 +336,7 @@ void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
{
mutex_lock(&pool->lock);
pool->p_put_chunk(pool, chunk);
+ pool->available_elems += 1 << chunk->order;
mutex_unlock(&pool->lock);
}
@@ -599,17 +357,13 @@ mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_
pool->tbl_type = pool_attr->table_type;
pool->opt_type = pool_attr->opt_type;
- /* Support general db */
- if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
- res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
- else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
- res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
- else
+ if (pool->flags & MLX5HWS_POOL_FLAG_BUDDY)
res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BITMAP;
pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->available_elems = 1 << pool_attr->alloc_log_sz;
if (hws_pool_db_init(pool, res_db_type))
goto free_pool;
@@ -623,18 +377,17 @@ free_pool:
return NULL;
}
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
{
- int i;
-
mutex_destroy(&pool->lock);
- for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
- if (pool->resource[i])
- hws_pool_resource_free(pool, i);
+ if (pool->available_elems != 1 << pool->alloc_log_sz)
+ mlx5hws_err(pool->ctx, "Attempting to destroy non-empty pool\n");
+
+ if (pool->resource)
+ hws_pool_resource_free(pool);
hws_pool_db_unint(pool);
kfree(pool);
- return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
index 621298b352b2..33e33d5f1fb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h
@@ -6,16 +6,12 @@
#define MLX5HWS_POOL_STC_LOG_SZ 15
-#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
-
enum mlx5hws_pool_type {
MLX5HWS_POOL_TYPE_STE,
MLX5HWS_POOL_TYPE_STC,
};
struct mlx5hws_pool_chunk {
- u32 resource_idx;
- /* Internal offset, relative to base index */
int offset;
int order;
};
@@ -27,35 +23,17 @@ struct mlx5hws_pool_resource {
};
enum mlx5hws_pool_flags {
- /* Only a one resource in that pool */
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
- /* No sharing resources between chunks */
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
- /* All objects are in the same size */
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
- /* Managed by buddy allocator */
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
- /* Allocate pool_type memory on pool creation */
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
-
- /* These values should be used by the caller */
- MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
- MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
- MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
- MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
- MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
- MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
- MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
- MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+ /* Managed by a buddy allocator. If this is not set only allocations of
+ * order 0 are supported.
+ */
+ MLX5HWS_POOL_FLAG_BUDDY = BIT(0),
};
enum mlx5hws_pool_optimize {
MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+ MLX5HWS_POOL_OPTIMIZE_MAX = 0x3,
};
struct mlx5hws_pool_attr {
@@ -68,34 +46,17 @@ struct mlx5hws_pool_attr {
};
enum mlx5hws_db_type {
- /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
- MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
- /* One resource only, all the elements are with same one size */
- MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
- /* Many resources, the memory allocated with buddy mechanism */
+ /* Uses a bitmap, supports only allocations of order 0. */
+ MLX5HWS_POOL_DB_TYPE_BITMAP,
+ /* Entries are managed using a buddy mechanism. */
MLX5HWS_POOL_DB_TYPE_BUDDY,
};
-struct mlx5hws_buddy_manager {
- struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
-struct mlx5hws_pool_elements {
- u32 num_of_elements;
- unsigned long *bitmap;
- u32 log_size;
- bool is_full;
-};
-
-struct mlx5hws_element_manager {
- struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
-};
-
struct mlx5hws_pool_db {
enum mlx5hws_db_type type;
union {
- struct mlx5hws_element_manager *element_manager;
- struct mlx5hws_buddy_manager *buddy_manager;
+ unsigned long *bitmap;
+ struct mlx5hws_buddy_mem *buddy;
};
};
@@ -111,11 +72,11 @@ struct mlx5hws_pool {
enum mlx5hws_pool_flags flags;
struct mutex lock; /* protect the pool */
size_t alloc_log_sz;
+ size_t available_elems;
enum mlx5hws_table_type tbl_type;
enum mlx5hws_pool_optimize opt_type;
- struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
- /* DB */
+ struct mlx5hws_pool_resource *resource;
+ struct mlx5hws_pool_resource *mirror_resource;
struct mlx5hws_pool_db db;
/* Functions */
mlx5hws_pool_unint_db p_db_uninit;
@@ -127,7 +88,7 @@ struct mlx5hws_pool *
mlx5hws_pool_create(struct mlx5hws_context *ctx,
struct mlx5hws_pool_attr *pool_attr);
-int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+void mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
struct mlx5hws_pool_chunk *chunk);
@@ -135,17 +96,37 @@ int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
struct mlx5hws_pool_chunk *chunk);
-static inline u32
-mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static inline u32 mlx5hws_pool_get_base_id(struct mlx5hws_pool *pool)
{
- return pool->resource[chunk->resource_idx]->base_id;
+ return pool->resource->base_id;
}
-static inline u32
-mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
- struct mlx5hws_pool_chunk *chunk)
+static inline u32 mlx5hws_pool_get_base_mirror_id(struct mlx5hws_pool *pool)
{
- return pool->mirror_resource[chunk->resource_idx]->base_id;
+ return pool->mirror_resource->base_id;
+}
+
+static inline bool
+mlx5hws_pool_empty(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == 0;
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+static inline bool
+mlx5hws_pool_full(struct mlx5hws_pool *pool)
+{
+ bool ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->available_elems == (1 << pool->alloc_log_sz);
+ mutex_unlock(&pool->lock);
+
+ return ret;
}
#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index a27a2d5ffc7b..5342a4cc7194 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -195,44 +195,30 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule)
+static int mlx5hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ u16 queue_id, bool skip_rx,
+ bool skip_tx)
{
struct mlx5hws_matcher *matcher = rule->matcher;
- struct mlx5hws_matcher_action_ste *action_ste;
- struct mlx5hws_pool_chunk ste = {0};
- int ret;
-
- action_ste = &matcher->action_ste;
- ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
- ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
- if (unlikely(ret)) {
- mlx5hws_err(matcher->tbl->ctx,
- "Failed to allocate STE for rule actions");
- return ret;
- }
-
- rule->action_ste.pool = matcher->action_ste.pool;
- rule->action_ste.num_stes = matcher->action_ste.max_stes;
- rule->action_ste.index = ste.offset;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
- return 0;
+ rule->action_ste.ste.order =
+ ilog2(roundup_pow_of_two(matcher->num_of_action_stes));
+ return mlx5hws_action_ste_chunk_alloc(&ctx->action_ste_pool[queue_id],
+ skip_rx, skip_tx,
+ &rule->action_ste);
}
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste)
{
- struct mlx5hws_pool_chunk ste = {0};
-
- if (!action_ste->num_stes)
+ if (!action_ste->action_tbl)
return;
- ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes));
- ste.offset = action_ste->index;
-
/* This release is safe only when the rule match STE was deleted
* (when the rule is being deleted) or replaced with the new STE that
* isn't pointing to old action STEs (when the rule is being updated).
*/
- mlx5hws_pool_chunk_free(action_ste->pool, &ste);
+ mlx5hws_action_ste_chunk_free(action_ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -250,22 +236,15 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste.pool = NULL;
- rule->action_ste.num_stes = 0;
- rule->action_ste.index = -1;
-
rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
rule->status = MLX5HWS_RULE_STATUS_UPDATING;
+ /* Save the old action STE info so we can free it after writing
+ * new action STEs and a corresponding match STE.
+ */
+ rule->old_action_ste = rule->action_ste;
}
- /* Initialize the old action STE info - shallow-copy action_ste.
- * In create flow this will set old_action_ste fields to initial values.
- * In update flow this will save the existing action STE info,
- * so that we will later use it to free old STEs.
- */
- rule->old_action_ste = rule->action_ste;
-
rule->pending_wqes = 0;
/* Init default send STE attributes */
@@ -277,7 +256,6 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
apply->common_res = &ctx->common_res;
- apply->jump_to_action_stc = matcher->action_ste.stc.offset;
apply->require_dep = 0;
}
@@ -353,17 +331,24 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- ret = hws_rule_alloc_action_ste(rule);
+ ret = mlx5hws_rule_alloc_action_ste(rule, attr->queue_id,
+ !!ste_attr.rtc_0,
+ !!ste_attr.rtc_1);
if (ret) {
mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
mlx5hws_send_abort_new_dep_wqe(queue);
return ret;
}
+ apply.jump_to_action_stc =
+ rule->action_ste.action_tbl->stc.offset;
/* Skip RX/TX based on the dep_wqe init */
- ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0;
- ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0;
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ rule->action_ste.action_tbl->rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ rule->action_ste.action_tbl->rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste.index + action_stes;
+ ste_attr.direct_index =
+ rule->action_ste.ste.offset + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -670,6 +655,124 @@ int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
return 0;
}
+static u8 hws_rule_ethertype_to_matcher_ipv(u32 ethertype)
+{
+ switch (ethertype) {
+ case ETH_P_IP:
+ return MLX5HWS_MATCHER_IPV_4;
+ case ETH_P_IPV6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static u8 hws_rule_ip_version_to_matcher_ipv(u32 ip_version)
+{
+ switch (ip_version) {
+ case 4:
+ return MLX5HWS_MATCHER_IPV_4;
+ case 6:
+ return MLX5HWS_MATCHER_IPV_6;
+ default:
+ return MLX5HWS_MATCHER_IPV_UNSET;
+ }
+}
+
+static int hws_rule_check_outer_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 outer_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 outer_ipv, ver;
+
+ if (matcher->matches_outer_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ethertype);
+ outer_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_outer_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ outer_headers.ip_version);
+ outer_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv_ether != outer_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent outer ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ outer_ipv = outer_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ outer_ipv_ether : outer_ipv_ip;
+ if (outer_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->outer_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ outer_ipv != matcher->outer_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on outer IP version\n");
+ return -EINVAL;
+ }
+ matcher->outer_ip_version = outer_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_inner_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u8 inner_ipv_ether = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv_ip = MLX5HWS_MATCHER_IPV_UNSET;
+ u8 inner_ipv, ver;
+
+ if (matcher->matches_inner_ethertype) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ethertype);
+ inner_ipv_ether = hws_rule_ethertype_to_matcher_ipv(ver);
+ }
+ if (matcher->matches_inner_ip_version) {
+ ver = MLX5_GET(fte_match_param, match_param,
+ inner_headers.ip_version);
+ inner_ipv_ip = hws_rule_ip_version_to_matcher_ipv(ver);
+ }
+
+ if (inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ip != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv_ether != inner_ipv_ip) {
+ mlx5hws_err(ctx, "Rule matches on inconsistent inner ethertype and ip version\n");
+ return -EINVAL;
+ }
+
+ inner_ipv = inner_ipv_ether != MLX5HWS_MATCHER_IPV_UNSET ?
+ inner_ipv_ether : inner_ipv_ip;
+ if (inner_ipv != MLX5HWS_MATCHER_IPV_UNSET &&
+ matcher->inner_ip_version != MLX5HWS_MATCHER_IPV_UNSET &&
+ inner_ipv != matcher->inner_ip_version) {
+ mlx5hws_err(ctx, "Matcher and rule disagree on inner IP version\n");
+ return -EINVAL;
+ }
+ matcher->inner_ip_version = inner_ipv;
+
+ return 0;
+}
+
+static int hws_rule_check_ip_version(struct mlx5hws_matcher *matcher,
+ u32 *match_param)
+{
+ int ret;
+
+ ret = hws_rule_check_outer_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_check_inner_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
u8 mt_idx,
u32 *match_param,
@@ -680,6 +783,10 @@ int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
{
int ret;
+ ret = hws_rule_check_ip_version(matcher, match_param);
+ if (unlikely(ret))
+ return ret;
+
rule_handle->matcher = matcher;
ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index b5ee94ac449b..1c47a9c11572 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -43,12 +43,6 @@ struct mlx5hws_rule_match_tag {
};
};
-struct mlx5hws_rule_action_ste_info {
- struct mlx5hws_pool *pool;
- int index; /* STE array index */
- u8 num_stes;
-};
-
struct mlx5hws_rule_resize_info {
u32 rtc_0;
u32 rtc_1;
@@ -64,8 +58,8 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
- struct mlx5hws_rule_action_ste_info action_ste;
- struct mlx5hws_rule_action_ste_info old_action_ste;
+ struct mlx5hws_action_ste_chunk action_ste;
+ struct mlx5hws_action_ste_chunk old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
u8 status; /* enum mlx5hws_rule_status */
@@ -75,7 +69,7 @@ struct mlx5hws_rule {
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste);
+void mlx5hws_rule_free_action_ste(struct mlx5hws_action_ste_chunk *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index cb6abc4ab7df..c4b22be19a9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -344,18 +344,133 @@ hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
}
}
+static void hws_send_engine_dump_error_cqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5_cqe64 *cqe)
+{
+ u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0;
+ struct mlx5hws_context *ctx = priv->rule->matcher->tbl->ctx;
+ u32 opcode = cqe ? get_cqe_opcode(cqe) : 0;
+ struct mlx5hws_rule *rule = priv->rule;
+
+ /* If something bad happens and lots of rules are failing, we don't
+ * want to pollute dmesg. Print only the first bad cqe per engine,
+ * the one that started the avalanche.
+ */
+ if (queue->error_cqe_printed)
+ return;
+
+ queue->error_cqe_printed = true;
+
+ if (mlx5hws_rule_move_in_progress(rule))
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion moving rule: phase %s, wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_WRITING ? "WRITING" :
+ rule->resize_info->state ==
+ MLX5HWS_RULE_RESIZE_STATE_DELETING ? "DELETING" :
+ "UNKNOWN",
+ rule->pending_wqes);
+ else
+ mlx5hws_err(ctx,
+ "--- rule 0x%08llx: error completion %s (%d), wqes left %d\n",
+ HWS_PTR_TO_ID(rule),
+ rule->status ==
+ MLX5HWS_RULE_STATUS_CREATING ? "CREATING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_DELETING ? "DELETING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_FAILING ? "FAILING" :
+ rule->status ==
+ MLX5HWS_RULE_STATUS_UPDATING ? "UPDATING" : "NA",
+ rule->status,
+ rule->pending_wqes);
+
+ mlx5hws_err(ctx, " rule 0x%08llx: matcher 0x%llx %s\n",
+ HWS_PTR_TO_ID(rule),
+ HWS_PTR_TO_ID(rule->matcher),
+ (rule->matcher->flags & MLX5HWS_MATCHER_FLAGS_ISOLATED) ?
+ "(isolated)" : "");
+
+ if (!cqe) {
+ mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n",
+ HWS_PTR_TO_ID(rule));
+ return;
+ }
+
+ mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n",
+ HWS_PTR_TO_ID(rule), opcode,
+ opcode == MLX5_CQE_REQ ? "(MLX5_CQE_REQ)" :
+ opcode == MLX5_CQE_REQ_ERR ? "(MLX5_CQE_REQ_ERR)" : " ");
+
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_error_syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[16]);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- hw_syndrome_type = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->rsvd1[17] >> 4);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- vendor_err_synd = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->vendor_err_synd);
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |--- syndrome = 0x%x\n",
+ HWS_PTR_TO_ID(rule),
+ err_cqe->syndrome);
+ }
+
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-- UPDATE STATUS = %s\n",
+ HWS_PTR_TO_ID(rule),
+ (be32_to_cpu(cqe->byte_cnt) & 0x80000000) ?
+ "FAILURE" : "SUCCESS");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------- SYNDROME = %s\n",
+ HWS_PTR_TO_ID(rule),
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 1) ?
+ "SET_FLOW_FAIL" :
+ ((be32_to_cpu(cqe->byte_cnt) & 0x00000003) == 2) ?
+ "DISABLE_FLOW_FAIL" : "UNKNOWN");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: cqe->sop_drop_qpn = 0x%08x\n",
+ HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->sop_drop_qpn));
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |-send wqe opcode = 0x%02x %s\n",
+ HWS_PTR_TO_ID(rule), wqe_opcode,
+ wqe_opcode == MLX5HWS_WQE_OPCODE_TBL_ACCESS ?
+ "(MLX5HWS_WQE_OPCODE_TBL_ACCESS)" : "(UNKNOWN)");
+ mlx5hws_err(ctx,
+ " rule 0x%08llx: |------------ qpn = 0x%06x\n",
+ HWS_PTR_TO_ID(rule),
+ be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff);
+}
+
static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
struct mlx5hws_send_ring_priv *priv,
u16 wqe_cnt,
- enum mlx5hws_flow_op_status *status)
+ enum mlx5hws_flow_op_status *status,
+ struct mlx5_cqe64 *cqe)
{
priv->rule->pending_wqes--;
- if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (unlikely(*status == MLX5HWS_FLOW_OP_ERROR)) {
if (priv->retry_id) {
+ /* If there is a retry_id, then it's not an error yet,
+ * retry to insert this rule in the collision RTC.
+ */
hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
return;
}
+ hws_send_engine_dump_error_cqe(queue, priv, cqe);
/* Some part of the rule failed */
priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
*priv->used_id = 0;
@@ -420,7 +535,8 @@ static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
if (priv->user_data) {
if (priv->rule) {
- hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ hws_send_engine_update_rule(queue, priv, wqe_cnt,
+ &status, cqe);
/* Completion is provided on the last rule WQE */
if (priv->rule->pending_wqes)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index f833092235c1..3fb8e99309b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -140,6 +140,7 @@ struct mlx5hws_send_engine {
u16 used_entries;
u16 num_entries;
bool err;
+ bool error_cqe_printed;
struct mutex lock; /* Protects the send engine */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index ab1297531232..568f691733f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -342,10 +342,10 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
}
-static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
- u32 ft_id,
- u32 fw_ft_type,
- u32 next_ft_id)
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
{
struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
@@ -389,10 +389,10 @@ int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
if (dst_tbl) {
if (list_empty(&dst_tbl->matchers_list)) {
/* Connect src_tbl last_ft to dst_tbl start anchor */
- ret = hws_table_ft_set_next_ft(src_tbl->ctx,
- last_ft_id,
- src_tbl->fw_ft_type,
- dst_tbl->ft_id);
+ ret = mlx5hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index dd50420eec9e..0400cce0c317 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -65,4 +65,9 @@ int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
u32 rtc_0_id,
u32 rtc_1_id);
+int mlx5hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id);
+
#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 8007d3f523c9..f367997ab61e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -833,15 +833,21 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
return steering_caps;
}
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
+ struct mlx5dr_action *dr_action;
+
switch (pkt_reformat->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
case MLX5_REFORMAT_TYPE_INSERT_HDR:
- return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
+ dr_action = pkt_reformat->fs_dr_action.dr_action;
+ *reformat_id = mlx5dr_action_get_pkt_reformat_id(dr_action);
+ return 0;
}
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
index 99a3b2eff6b8..f869f2daefbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
@@ -38,7 +38,9 @@ struct mlx5_fs_dr_table {
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
@@ -49,9 +51,11 @@ static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
return NULL;
}
-static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+static inline int
+mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat,
+ u32 *reformat_id)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d10d4c396040..da5c24fc7b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -465,19 +465,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.node_guid);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
@@ -519,19 +522,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.qkey_violation_counter);
-
+out:
kvfree(out);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index e746cd9c68ed..eac9a14a6058 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -205,11 +205,11 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_params mlxsw_thermal_params = {
+static const struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
-static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_ops = {
.should_bind = mlxsw_thermal_should_bind,
.get_temp = mlxsw_thermal_get_temp,
};
@@ -252,7 +252,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_module_temp_get,
};
@@ -280,7 +280,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+static const struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.should_bind = mlxsw_thermal_module_should_bind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3080ea032e7f..618957d65663 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1159,63 +1159,31 @@ static int mlxsw_sp_set_features(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
- int err;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+ config, extack);
}
-static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct ifreq *ifr)
+static int mlxsw_sp_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config config;
- int err;
-
- err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
- &config);
- if (err)
- return err;
-
- if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
- return -EFAULT;
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- return 0;
+ return mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+ config);
}
static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct hwtstamp_config config = {0};
-
- mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
-}
-
-static int
-mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct kernel_hwtstamp_config config = {};
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
- case SIOCGHWTSTAMP:
- return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
- default:
- return -EOPNOTSUPP;
- }
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config,
+ NULL);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1232,7 +1200,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
+ .ndo_hwtstamp_get = mlxsw_sp_port_hwtstamp_get,
+ .ndo_hwtstamp_set = mlxsw_sp_port_hwtstamp_set,
};
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 37cd1d002b3b..b03ff9e044f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -233,9 +233,10 @@ struct mlxsw_sp_ptp_ops {
u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
@@ -351,7 +352,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_flow_block *eg_flow_block;
struct {
struct delayed_work shaper_dw;
- struct hwtstamp_config hwtstamp_config;
+ struct kernel_hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 3f64cdbabfa3..0a8fb9c842d3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -262,7 +262,7 @@ err_port_pause_configure:
}
struct mlxsw_sp_port_hw_stats {
- char str[ETH_GSTRING_LEN];
+ char str[ETH_GSTRING_LEN] __nonstring;
u64 (*getter)(const char *payload);
bool cells_bytes;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ca8b9d18fbb9..e8182dd76c7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -46,7 +46,7 @@ struct mlxsw_sp2_ptp_state {
refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
* enabled.
*/
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
struct mutex lock; /* Protects 'config' and HW configuration. */
};
@@ -1083,14 +1083,14 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
}
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
*config = mlxsw_sp_port->ptp.hwtstamp_config;
return 0;
}
static int
-mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp1_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1246,7 +1246,8 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
enum hwtstamp_rx_filters rx_filter;
u16 ing_types;
@@ -1270,7 +1271,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
return 0;
@@ -1451,7 +1452,7 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
@@ -1465,7 +1466,7 @@ int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+mlxsw_sp2_ptp_get_message_types(const struct kernel_hwtstamp_config *config,
u16 *p_ing_types, u16 *p_egr_types,
enum hwtstamp_rx_filters *p_rx_filter)
{
@@ -1542,7 +1543,7 @@ static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1556,7 +1557,7 @@ static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
}
static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
int err;
@@ -1571,7 +1572,7 @@ static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
u16 ing_types, u16 egr_types,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1592,7 +1593,7 @@ static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config new_config)
+ struct kernel_hwtstamp_config new_config)
{
struct mlxsw_sp2_ptp_state *ptp_state;
int err;
@@ -1614,11 +1615,12 @@ err_ptp_disable:
}
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
+ struct kernel_hwtstamp_config new_config;
struct mlxsw_sp2_ptp_state *ptp_state;
enum hwtstamp_rx_filters rx_filter;
- struct hwtstamp_config new_config;
u16 new_ing_types, new_egr_types;
bool ptp_enabled;
int err;
@@ -1652,7 +1654,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->ptp.ing_types = new_ing_types;
mlxsw_sp_port->ptp.egr_types = new_egr_types;
- /* Notify the ioctl caller what we are actually timestamping. */
+ /* Notify the caller what we are actually timestamping. */
config->rx_filter = rx_filter;
mutex_unlock(&ptp_state->lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 102db9060135..df37f1470830 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -34,10 +34,11 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
u64 timestamp);
int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
@@ -65,10 +66,11 @@ void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb, u16 local_port);
int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config);
int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct kernel_ethtool_ts_info *info);
@@ -117,14 +119,15 @@ mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
static inline int
mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -181,14 +184,15 @@ static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
static inline int
mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
static inline int
mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct hwtstamp_config *config)
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 464821dd492d..a2033837182e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
.rif = rif,
};
+ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
+ return 0;
+
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
if (rms.err)
goto err_arp;
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index 831921b9d4d5..3ba527514f1e 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -27,6 +27,7 @@ config FBNIC
select NET_DEVLINK
select PAGE_POOL
select PHYLINK
+ select PLDMFW
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index de6b1a340f55..65815d4f379e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -19,6 +19,7 @@
struct fbnic_napi_vector;
#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MBX_CMPL_SLOTS 4
struct fbnic_dev {
struct device *dev;
@@ -42,7 +43,7 @@ struct fbnic_dev {
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
- struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_fw_completion *cmpl_data[FBNIC_MBX_CMPL_SLOTS];
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -81,6 +82,9 @@ struct fbnic_dev {
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
+
+ /* Lock protecting access to hw_stats */
+ spinlock_t hw_stats_lock;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 51bee8072420..36393a17d92d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -397,6 +397,15 @@ enum {
#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+#define FBNIC_TCE_TTI_CM_DROP_PKTS 0x0403e /* 0x100f8 */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_L 0x0403f /* 0x100fc */
+#define FBNIC_TCE_TTI_CM_DROP_BYTE_H 0x04040 /* 0x10100 */
+#define FBNIC_TCE_TTI_FRAME_DROP_PKTS 0x04041 /* 0x10104 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_L 0x04042 /* 0x10108 */
+#define FBNIC_TCE_TTI_FRAME_DROP_BYTE_H 0x04043 /* 0x1010c */
+#define FBNIC_TCE_TBI_DROP_PKTS 0x04044 /* 0x10110 */
+#define FBNIC_TCE_TBI_DROP_BYTE_L 0x04045 /* 0x10114 */
+
#define FBNIC_TCE_TCAM_IDX2DEST_MAP 0x0404A /* 0x10128 */
#define FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 CSR_GENMASK(3, 0)
enum {
@@ -432,6 +441,11 @@ enum {
#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_TMI_DROP_PKTS 0x04402 /* 0x11008 */
+#define FBNIC_TMI_DROP_BYTE_L 0x04403 /* 0x1100c */
+#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
+#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
+#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
/* Precision Time Protocol Registers */
@@ -485,6 +499,14 @@ enum {
FBNIC_RXB_FIFO_INDICES = 8
};
+enum {
+ FBNIC_RXB_INTF_NET = 0,
+ FBNIC_RXB_INTF_RBT = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_INTF_INDICES = 4
+};
+
#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
#define FBNIC_RXB_CT_SIZE_CNT 8
#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
@@ -866,6 +888,12 @@ enum {
#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+/* Tx Work Queue Statistics Registers */
+#define FBNIC_QUEUE_TWQ0_PKT_CNT 0x062 /* 0x188 */
+#define FBNIC_QUEUE_TWQ0_ERR_CNT 0x063 /* 0x18c */
+#define FBNIC_QUEUE_TWQ1_PKT_CNT 0x072 /* 0x1c8 */
+#define FBNIC_QUEUE_TWQ1_ERR_CNT 0x073 /* 0x1cc */
+
/* Tx Completion Queue Registers */
#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
@@ -955,6 +983,12 @@ enum {
FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
};
+/* Rx Per CQ Statistics Counters */
+#define FBNIC_QUEUE_RDE_PKT_CNT 0x2a2 /* 0xa88 */
+#define FBNIC_QUEUE_RDE_PKT_ERR_CNT 0x2a3 /* 0xa8c */
+#define FBNIC_QUEUE_RDE_CQ_DROP_CNT 0x2a4 /* 0xa90 */
+#define FBNIC_QUEUE_RDE_BDQ_DROP_CNT 0x2a5 /* 0xa94 */
+
/* Rx Interrupt Manager Registers */
#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index 0072d612215e..4c4938eedd7b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -3,10 +3,12 @@
#include <linux/unaligned.h>
#include <linux/pci.h>
+#include <linux/pldmfw.h>
#include <linux/types.h>
#include <net/devlink.h>
#include "fbnic.h"
+#include "fbnic_tlv.h"
#define FBNIC_SN_STR_LEN 24
@@ -109,8 +111,262 @@ static int fbnic_devlink_info_get(struct devlink *devlink,
return 0;
}
+static bool
+fbnic_pldm_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pldmfw_desc_tlv *desc;
+ u32 anti_rollback_ver = 0;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+
+ /* First, use the standard PCI matching function */
+ if (!pldmfw_op_pci_match_record(context, record))
+ return false;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ /* If PCI match is successful, check for vendor-specific descriptors */
+ list_for_each_entry(desc, &record->descs, entry) {
+ if (desc->type != PLDM_DESC_ID_VENDOR_DEFINED)
+ continue;
+
+ if (desc->size < 21 || desc->data[0] != 1 ||
+ desc->data[1] != 15)
+ continue;
+
+ if (memcmp(desc->data + 2, "AntiRollbackVer", 15) != 0)
+ continue;
+
+ anti_rollback_ver = get_unaligned_le32(desc->data + 17);
+ break;
+ }
+
+ /* Compare versions and return error if they do not match */
+ if (anti_rollback_ver < fbd->fw_cap.anti_rollback_version) {
+ char buf[128];
+
+ snprintf(buf, sizeof(buf),
+ "New firmware anti-rollback version (0x%x) is older than device version (0x%x)!",
+ anti_rollback_ver, fbd->fw_cap.anti_rollback_version);
+ devlink_flash_update_status_notify(devlink, buf,
+ "Anti-Rollback", 0, 0);
+
+ return false;
+ }
+
+ return true;
+}
+
+static int
+fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
+{
+ struct fbnic_fw_completion *cmpl;
+ int err;
+
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_fw_start_upgrade(fbd, cmpl,
+ component->identifier,
+ component->component_size);
+ if (err)
+ goto cmpl_free;
+
+ /* Wait for firmware to ack firmware upgrade start */
+ if (wait_for_completion_timeout(&cmpl->done, 10 * HZ))
+ err = cmpl->result;
+ else
+ err = -ETIMEDOUT;
+
+ fbnic_fw_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static int
+fbnic_flash_component(struct pldmfw *context,
+ struct pldmfw_component *component)
+{
+ const u8 *data = component->component_data;
+ const u32 size = component->component_size;
+ struct fbnic_fw_completion *cmpl;
+ const char *component_name;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+ struct pci_dev *pdev;
+ u32 offset = 0;
+ u32 length = 0;
+ char buf[32];
+ int err;
+
+ pdev = to_pci_dev(context->dev);
+ fbd = pci_get_drvdata(pdev);
+ devlink = priv_to_devlink(fbd);
+
+ switch (component->identifier) {
+ case QSPI_SECTION_CMRT:
+ component_name = "boot1";
+ break;
+ case QSPI_SECTION_CONTROL_FW:
+ component_name = "boot2";
+ break;
+ case QSPI_SECTION_OPTION_ROM:
+ component_name = "option-rom";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "Unknown component ID %u!",
+ component->identifier);
+ devlink_flash_update_status_notify(devlink, buf, NULL, 0,
+ size);
+ return -EINVAL;
+ }
+
+ /* Once firmware receives the request to start upgrading it responds
+ * with two messages:
+ * 1. An ACK that it received the message and possible error code
+ * indicating that an upgrade is not currently possible.
+ * 2. A request for the first chunk of data
+ *
+ * Setup completions for write before issuing the start message so
+ * the driver can catch both messages.
+ */
+ cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ);
+ if (!cmpl)
+ return -ENOMEM;
+
+ err = fbnic_mbx_set_cmpl(fbd, cmpl);
+ if (err)
+ goto cmpl_free;
+
+ devlink_flash_update_timeout_notify(devlink, "Initializing",
+ component_name, 15);
+ err = fbnic_flash_start(fbd, component);
+ if (err)
+ goto err_no_msg;
+
+ while (offset < size) {
+ if (!wait_for_completion_timeout(&cmpl->done, 15 * HZ)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ err = cmpl->result;
+ if (err)
+ break;
+
+ /* Verify firmware is requesting the next chunk in the seq. */
+ if (cmpl->u.fw_update.offset != offset + length) {
+ err = -EFAULT;
+ break;
+ }
+
+ offset = cmpl->u.fw_update.offset;
+ length = cmpl->u.fw_update.length;
+
+ if (length > TLV_MAX_DATA || offset + length > size) {
+ err = -EFAULT;
+ break;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ component_name,
+ offset, size);
+
+ /* Mailbox will set length to 0 once it receives the finish
+ * message.
+ */
+ if (!length)
+ continue;
+
+ reinit_completion(&cmpl->done);
+ err = fbnic_fw_xmit_fw_write_chunk(fbd, data, offset, length,
+ 0);
+ if (err)
+ break;
+ }
+
+ if (err) {
+ fbnic_fw_xmit_fw_write_chunk(fbd, NULL, 0, 0, err);
+err_no_msg:
+ snprintf(buf, sizeof(buf), "Mailbox encountered error %d!",
+ err);
+ devlink_flash_update_status_notify(devlink, buf,
+ component_name, 0, 0);
+ }
+
+ fbnic_fw_clear_cmpl(fbd, cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(cmpl);
+
+ return err;
+}
+
+static const struct pldmfw_ops fbnic_pldmfw_ops = {
+ .match_record = fbnic_pldm_match_record,
+ .flash_component = fbnic_flash_component,
+};
+
+static int
+fbnic_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ const struct firmware *fw = params->fw;
+ struct device *dev = fbd->dev;
+ struct pldmfw context;
+ char *err_msg;
+ int err;
+
+ context.ops = &fbnic_pldmfw_ops;
+ context.dev = dev;
+
+ err = pldmfw_flash_image(&context, fw);
+ if (err) {
+ switch (err) {
+ case -EINVAL:
+ err_msg = "Invalid image";
+ break;
+ case -EOPNOTSUPP:
+ err_msg = "Unsupported image";
+ break;
+ case -ENOMEM:
+ err_msg = "Out of memory";
+ break;
+ case -EFAULT:
+ err_msg = "Invalid header";
+ break;
+ case -ENOENT:
+ err_msg = "No matching record";
+ break;
+ case -ENODEV:
+ err_msg = "No matching device";
+ break;
+ case -ETIMEDOUT:
+ err_msg = "Timed out waiting for reply";
+ break;
+ default:
+ err_msg = "Unknown error";
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Failed to flash PLDM Image: %s (error: %d)",
+ err_msg, err);
+ }
+
+ return err;
+}
+
static const struct devlink_ops fbnic_devlink_ops = {
- .info_get = fbnic_devlink_info_get,
+ .info_get = fbnic_devlink_info_get,
+ .flash_update = fbnic_devlink_flash_update,
};
void fbnic_devlink_free(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 0a751a2aaf73..5c7556c8c4c5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -27,6 +27,19 @@ struct fbnic_stat {
FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
+ /* TTI */
+ FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
+ FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
+ FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
+ FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
+ FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
+ FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
+
+ /* TMI */
+ FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
+ FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
+ FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
+
/* RPC */
FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
@@ -39,7 +52,64 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
};
#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
-#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
+
+#define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
+ FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
+
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
+ FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
+};
+
+#define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
+
+#define FBNIC_RXB_FIFO_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
+ FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
+};
+
+#define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
+
+#define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
+ FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
+};
+
+#define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
+ ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
+
+#define FBNIC_HW_Q_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
+
+static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
+ FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
+ FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
+};
+
+#define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
+#define FBNIC_HW_STATS_LEN \
+ (FBNIC_HW_FIXED_STATS_LEN + \
+ FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
+ FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
+ FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
+ FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
static void
fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -298,31 +368,125 @@ err_free_clone:
return err;
}
-static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_enqueue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_rxb_fifo_stats;
+ for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
{
+ const struct fbnic_stat *stat;
int i;
+ stat = fbnic_gstrings_rxb_dequeue_stats;
+ for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
+static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+ const struct fbnic_stat *stat;
+ int i, idx;
+
switch (sset) {
case ETH_SS_STATS:
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++)
+ for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
+ fbnic_get_rxb_enqueue_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_strings(&data, i);
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
+ fbnic_get_rxb_dequeue_strings(&data, i);
+
+ for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
+ stat = fbnic_gstrings_hw_q_stats;
+
+ for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
+ ethtool_sprintf(&data, stat->string, idx);
+ }
break;
}
}
+static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
+ const void *base, int len, u64 **data)
+{
+ while (len--) {
+ u8 *curr = (u8 *)base + stat->offset;
+
+ **data = *(u64 *)curr;
+
+ stat++;
+ (*data)++;
+ }
+}
+
static void fbnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct fbnic_net *fbn = netdev_priv(dev);
- const struct fbnic_stat *stat;
+ struct fbnic_dev *fbd = fbn->fbd;
int i;
fbnic_get_hw_stats(fbn->fbd);
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
- stat = &fbnic_gstrings_hw_stats[i];
- data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
+ FBNIC_HW_FIXED_STATS_LEN, &data);
+
+ for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_enqueue_stats *enq;
+
+ enq = &fbd->hw_stats.rxb.enq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
+ enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
+ const struct fbnic_rxb_fifo_stats *fifo;
+
+ fifo = &fbd->hw_stats.rxb.fifo[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
+ fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
+ const struct fbnic_rxb_dequeue_stats *deq;
+
+ deq = &fbd->hw_stats.rxb.deq[i];
+ fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
+ deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
+ &data);
+ }
+
+ for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
+ const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
+
+ fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
+ FBNIC_HW_Q_STATS_LEN, &data);
}
+ spin_unlock(&fbd->hw_stats_lock);
}
static int fbnic_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 3d9636a6c968..e2368075ab8c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -237,6 +237,44 @@ static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
return err;
}
+static int fbnic_mbx_set_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ int free = -EXFULL;
+ int i;
+
+ if (!tx_mbx->ready)
+ return -ENODEV;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (!fbd->cmpl_data[i])
+ free = i;
+ else if (fbd->cmpl_data[i]->msg_type == cmpl_data->msg_type)
+ return -EEXIST;
+ }
+
+ if (free == -EXFULL)
+ return -EXFULL;
+
+ fbd->cmpl_data[free] = cmpl_data;
+
+ return 0;
+}
+
+static void fbnic_mbx_clear_cmpl_slot(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] == cmpl_data) {
+ fbd->cmpl_data[i] = NULL;
+ break;
+ }
+ }
+}
+
static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
@@ -258,6 +296,19 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
struct fbnic_tlv_msg *msg,
struct fbnic_fw_completion *cmpl_data)
@@ -266,23 +317,20 @@ static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
int err;
spin_lock_irqsave(&fbd->fw_tx_lock, flags);
-
- /* If we are already waiting on a completion then abort */
- if (cmpl_data && fbd->cmpl_data) {
- err = -EBUSY;
- goto unlock_mbx;
+ if (cmpl_data) {
+ err = fbnic_mbx_set_cmpl_slot(fbd, cmpl_data);
+ if (err)
+ goto unlock_mbx;
}
- /* Record completion location and submit request */
- if (cmpl_data)
- fbd->cmpl_data = cmpl_data;
-
err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
- /* If msg failed then clear completion data for next caller */
+ /* If we successfully reserved a completion and msg failed
+ * then clear completion data for next caller
+ */
if (err && cmpl_data)
- fbd->cmpl_data = NULL;
+ fbnic_mbx_clear_cmpl_slot(fbd, cmpl_data);
unlock_mbx:
spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
@@ -304,12 +352,18 @@ fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
{
struct fbnic_fw_completion *cmpl_data = NULL;
unsigned long flags;
+ int i;
spin_lock_irqsave(&fbd->fw_tx_lock, flags);
- if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
- cmpl_data = fbd->cmpl_data;
- kref_get(&fbd->cmpl_data->ref_count);
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ if (fbd->cmpl_data[i] &&
+ fbd->cmpl_data[i]->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data[i];
+ kref_get(&cmpl_data->ref_count);
+ break;
+ }
}
+
spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
return cmpl_data;
@@ -464,6 +518,7 @@ static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION),
FBNIC_TLV_ATTR_LAST
};
@@ -586,6 +641,9 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
fbd->fw_cap.all_multi = all_multi;
+ fbd->fw_cap.anti_rollback_version =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+
return 0;
}
@@ -708,6 +766,188 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ if (!len)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_SECTION, id);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_START_UPGRADE_IMAGE_LENGTH,
+ len);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_start_upgrade_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_START_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_start_upgrade_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_START_UPGRADE_ERROR);
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP);
+ if (!msg)
+ return -ENOMEM;
+
+ /* Report error to FW to cancel upgrade */
+ if (cancel_error) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_ERROR,
+ cancel_error);
+ if (err)
+ goto free_message;
+ }
+
+ if (data) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_WRITE_CHUNK_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_value(msg, FBNIC_FW_WRITE_CHUNK_DATA,
+ data + offset, length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_write_chunk_req_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_WRITE_CHUNK_LENGTH),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_write_chunk_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ u32 offset;
+ u32 length;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Pull length/offset pair and mark it as complete */
+ offset = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_WRITE_CHUNK_LENGTH);
+ cmpl_data->u.fw_update.offset = offset;
+ cmpl_data->u.fw_update.length = length;
+
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_finish_upgrade_req_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_FINISH_UPGRADE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer */
+ msg_type = FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ /* Check for errors */
+ err = fta_get_sint(results, FBNIC_FW_FINISH_UPGRADE_ERROR);
+
+ /* Close out update by incrementing offset by length which should
+ * match the total size of the component. Set length to 0 since no
+ * new chunks will be requested.
+ */
+ cmpl_data->u.fw_update.offset += cmpl_data->u.fw_update.length;
+ cmpl_data->u.fw_update.length = 0;
+
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return 0;
+}
+
/**
* fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
* @fbd: FBNIC device structure
@@ -792,6 +1032,15 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
+ fbnic_fw_start_upgrade_resp_index,
+ fbnic_fw_parse_fw_start_upgrade_resp),
+ FBNIC_TLV_PARSER(FW_WRITE_CHUNK_REQ,
+ fbnic_fw_write_chunk_req_index,
+ fbnic_fw_parse_fw_write_chunk_req),
+ FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
+ fbnic_fw_finish_upgrade_req_index,
+ fbnic_fw_parse_fw_finish_upgrade_req),
FBNIC_TLV_PARSER(TSENE_READ_RESP,
fbnic_tsene_read_resp_index,
fbnic_fw_parse_tsene_read_resp),
@@ -921,10 +1170,16 @@ static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
{
- if (fbd->cmpl_data) {
- __fbnic_fw_evict_cmpl(fbd->cmpl_data);
- fbd->cmpl_data = NULL;
+ int i;
+
+ for (i = 0; i < FBNIC_MBX_CMPL_SLOTS; i++) {
+ struct fbnic_fw_completion *cmpl_data = fbd->cmpl_data[i];
+
+ if (cmpl_data)
+ __fbnic_fw_evict_cmpl(cmpl_data);
}
+
+ memset(fbd->cmpl_data, 0, sizeof(fbd->cmpl_data));
}
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
@@ -977,20 +1232,28 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fw_version, str_sz);
}
-void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
- u32 msg_type)
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
{
- fw_cmpl->msg_type = msg_type;
- init_completion(&fw_cmpl->done);
- kref_init(&fw_cmpl->ref_count);
+ struct fbnic_fw_completion *cmpl;
+
+ cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ if (!cmpl)
+ return NULL;
+
+ cmpl->msg_type = msg_type;
+ init_completion(&cmpl->done);
+ kref_init(&cmpl->ref_count);
+
+ return cmpl;
}
-void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
+void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
{
unsigned long flags;
spin_lock_irqsave(&fbd->fw_tx_lock, flags);
- fbd->cmpl_data = NULL;
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index a3618e7826c2..08bc4b918de7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -42,6 +42,7 @@ struct fbnic_fw_cap {
u8 all_multi : 1;
u8 link_speed;
u8 link_fec;
+ u32 anti_rollback_version;
};
struct fbnic_fw_completion {
@@ -51,6 +52,10 @@ struct fbnic_fw_completion {
int result;
union {
struct {
+ u32 offset;
+ u32 length;
+ } fw_update;
+ struct {
s32 millivolts;
s32 millidegrees;
} tsene;
@@ -59,17 +64,25 @@ struct fbnic_fw_completion {
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
+int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ unsigned int id, unsigned int len);
+int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
+ const u8 *data, u32 offset, u16 length,
+ int cancel_error);
int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
-void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data,
- u32 msg_type);
-void fbnic_fw_clear_compl(struct fbnic_dev *fbd);
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
+void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
@@ -86,6 +99,15 @@ do { \
#define fbnic_mk_fw_ver_str(_rev_id, _str) \
fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str, sizeof(_str))
+enum {
+ QSPI_SECTION_CMRT = 0,
+ QSPI_SECTION_CONTROL_FW = 1,
+ QSPI_SECTION_UCODE = 2,
+ QSPI_SECTION_OPTION_ROM = 3,
+ QSPI_SECTION_USER = 4,
+ QSPI_SECTION_INVALID,
+};
+
#define FW_HEARTBEAT_PERIOD (10 * HZ)
enum {
@@ -95,6 +117,12 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
+ FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
+ FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
+ FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
};
@@ -122,6 +150,7 @@ enum {
FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION = 0x15,
FBNIC_FW_CAP_RESP_MSG_MAX
};
@@ -149,4 +178,25 @@ enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
+
+enum {
+ FBNIC_FW_START_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_START_UPGRADE_SECTION = 0x1,
+ FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
+ FBNIC_FW_START_UPGRADE_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_WRITE_CHUNK_OFFSET = 0x0,
+ FBNIC_FW_WRITE_CHUNK_LENGTH = 0x1,
+ FBNIC_FW_WRITE_CHUNK_DATA = 0x2,
+ FBNIC_FW_WRITE_CHUNK_ERROR = 0x3,
+ FBNIC_FW_WRITE_CHUNK_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_FINISH_UPGRADE_ERROR = 0x0,
+ FBNIC_FW_FINISH_UPGRADE_MSG_MAX
+};
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index 89ac6bc8c7fc..4223d8100e64 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -70,6 +70,100 @@ static void fbnic_hw_stat_rd64(struct fbnic_dev *fbd, u32 reg, s32 offset,
stat->u.old_reg_value_64 = new_reg_value;
}
+static void fbnic_reset_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rst32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_DROP_PKTS, &tmi->drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TMI_ILLEGAL_PTP_REQS,
+ &tmi->ptp_illegal_req);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_GOOD_PTP_TS, &tmi->ptp_good_ts);
+ fbnic_hw_stat_rd32(fbd, FBNIC_TMI_BAD_PTP_TS, &tmi->ptp_bad_ts);
+}
+
+static void fbnic_get_tmi_stats(struct fbnic_dev *fbd,
+ struct fbnic_tmi_stats *tmi)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_TMI_DROP_BYTE_L, 1, &tmi->drop.bytes);
+}
+
+static void fbnic_reset_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+ fbnic_hw_stat_rst64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
+static void fbnic_get_tti_stats32(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_CM_DROP_PKTS,
+ &tti->cm_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_PKTS,
+ &tti->frame_drop.frames);
+
+ fbnic_hw_stat_rd32(fbd,
+ FBNIC_TCE_TBI_DROP_PKTS,
+ &tti->tbi_drop.frames);
+}
+
+static void fbnic_get_tti_stats(struct fbnic_dev *fbd,
+ struct fbnic_tti_stats *tti)
+{
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_CM_DROP_BYTE_L,
+ 1,
+ &tti->cm_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TTI_FRAME_DROP_BYTE_L,
+ 1,
+ &tti->frame_drop.bytes);
+
+ fbnic_hw_stat_rd64(fbd,
+ FBNIC_TCE_TBI_DROP_BYTE_L,
+ 1,
+ &tti->tbi_drop.bytes);
+}
+
static void fbnic_reset_rpc_stats(struct fbnic_dev *fbd,
struct fbnic_rpc_stats *rpc)
{
@@ -117,6 +211,221 @@ static void fbnic_get_rpc_stats32(struct fbnic_dev *fbd,
&rpc->ovr_size_err);
}
+static void fbnic_reset_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.u.old_reg_value_32 = 0;
+}
+
+static void fbnic_reset_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_reset_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+
+ fbnic_hw_stat_rst32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+ fbnic_hw_stat_rst64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+}
+
+static void fbnic_reset_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_reset_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_reset_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_reset_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_fifo_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DROP_FRMS_STS(i),
+ &fifo->drop.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRUN_FRMS_STS(i),
+ &fifo->trunc.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_DROP_STS(i),
+ &fifo->trans_drop);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_TRANS_ECN_STS(i),
+ &fifo->trans_ecn);
+
+ fifo->level.value = rd32(fbd, FBNIC_RXB_PBUF_FIFO_LEVEL(i));
+}
+
+static void fbnic_get_rxb_fifo_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_fifo_stats *fifo)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DROP_BYTES_STS_L(i), 1,
+ &fifo->drop.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_TRUN_BYTES_STS_L(i), 1,
+ &fifo->trunc.bytes);
+
+ fbnic_get_rxb_fifo_stats32(fbd, i, fifo);
+}
+
+static void fbnic_get_rxb_enq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_DRBO_FRM_CNT_SRC(i),
+ &enq->drbo.frames);
+
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTEGRITY_ERR(i),
+ &enq->integrity_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_MAC_ERR(i),
+ &enq->mac_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PARSER_ERR(i),
+ &enq->parser_err);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_FRM_ERR(i),
+ &enq->frm_err);
+}
+
+static void fbnic_get_rxb_enq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_enqueue_stats *enq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(i), 4,
+ &enq->drbo.bytes);
+
+ fbnic_get_rxb_enq_stats32(fbd, i, enq);
+}
+
+static void fbnic_get_rxb_deq_stats32(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_INTF_FRM_CNT_DST(i),
+ &deq->intf.frames);
+ fbnic_hw_stat_rd32(fbd, FBNIC_RXB_PBUF_FRM_CNT_DST(i),
+ &deq->pbuf.frames);
+}
+
+static void fbnic_get_rxb_deq_stats(struct fbnic_dev *fbd, int i,
+ struct fbnic_rxb_dequeue_stats *deq)
+{
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_INTF_BYTE_CNT_DST_L(i), 4,
+ &deq->intf.bytes);
+ fbnic_hw_stat_rd64(fbd, FBNIC_RXB_PBUF_BYTE_CNT_DST_L(i), 4,
+ &deq->pbuf.bytes);
+
+ fbnic_get_rxb_deq_stats32(fbd, i, deq);
+}
+
+static void fbnic_get_rxb_stats32(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats32(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats32(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats32(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_get_rxb_stats(struct fbnic_dev *fbd,
+ struct fbnic_rxb_stats *rxb)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
+ fbnic_get_rxb_fifo_stats(fbd, i, &rxb->fifo[i]);
+
+ for (i = 0; i < FBNIC_RXB_INTF_INDICES; i++) {
+ fbnic_get_rxb_enq_stats(fbd, i, &rxb->enq[i]);
+ fbnic_get_rxb_deq_stats(fbd, i, &rxb->deq[i]);
+ }
+}
+
+static void fbnic_reset_hw_rxq_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rst32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ int i;
+
+ for (i = 0; i < fbd->max_num_queues; i++, hw_q++) {
+ u32 base = FBNIC_QUEUE(i);
+
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_PKT_ERR_CNT,
+ &hw_q->rde_pkt_err);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_CQ_DROP_CNT,
+ &hw_q->rde_pkt_cq_drop);
+ fbnic_hw_stat_rd32(fbd,
+ base + FBNIC_QUEUE_RDE_BDQ_DROP_CNT,
+ &hw_q->rde_pkt_bdq_drop);
+ }
+}
+
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q)
+{
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_get_hw_rxq_stats32(fbd, hw_q);
+ spin_unlock(&fbd->hw_stats_lock);
+}
+
static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
struct fbnic_pcie_stats *pcie)
{
@@ -203,18 +512,40 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
{
+ spin_lock(&fbd->hw_stats_lock);
+ fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
+ fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
+ fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats_lock);
}
-void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
+ fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
+ fbnic_get_rxb_stats32(fbd, &fbd->hw_stats.rxb);
+ fbnic_get_hw_rxq_stats32(fbd, fbd->hw_stats.hw_q);
+}
+
+void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
+{
+ spin_lock(&fbd->hw_stats_lock);
+ __fbnic_get_hw_stats32(fbd);
+ spin_unlock(&fbd->hw_stats_lock);
}
void fbnic_get_hw_stats(struct fbnic_dev *fbd)
{
- fbnic_get_hw_stats32(fbd);
+ spin_lock(&fbd->hw_stats_lock);
+ __fbnic_get_hw_stats32(fbd);
+ fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
+ fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
+ fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
+ spin_unlock(&fbd->hw_stats_lock);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 78df56b87745..07e54bb75bf3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -17,6 +17,11 @@ struct fbnic_stat_counter {
bool reported;
};
+struct fbnic_hw_stat {
+ struct fbnic_stat_counter frames;
+ struct fbnic_stat_counter bytes;
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -37,12 +42,49 @@ struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
};
+struct fbnic_tmi_stats {
+ struct fbnic_hw_stat drop;
+ struct fbnic_stat_counter ptp_illegal_req, ptp_good_ts, ptp_bad_ts;
+};
+
+struct fbnic_tti_stats {
+ struct fbnic_hw_stat cm_drop, frame_drop, tbi_drop;
+};
+
struct fbnic_rpc_stats {
struct fbnic_stat_counter unkn_etype, unkn_ext_hdr;
struct fbnic_stat_counter ipv4_frag, ipv6_frag, ipv4_esp, ipv6_esp;
struct fbnic_stat_counter tcp_opt_err, out_of_hdr_err, ovr_size_err;
};
+struct fbnic_rxb_enqueue_stats {
+ struct fbnic_hw_stat drbo;
+ struct fbnic_stat_counter integrity_err, mac_err;
+ struct fbnic_stat_counter parser_err, frm_err;
+};
+
+struct fbnic_rxb_fifo_stats {
+ struct fbnic_hw_stat drop, trunc;
+ struct fbnic_stat_counter trans_drop, trans_ecn;
+ struct fbnic_stat_counter level;
+};
+
+struct fbnic_rxb_dequeue_stats {
+ struct fbnic_hw_stat intf, pbuf;
+};
+
+struct fbnic_rxb_stats {
+ struct fbnic_rxb_enqueue_stats enq[FBNIC_RXB_ENQUEUE_INDICES];
+ struct fbnic_rxb_fifo_stats fifo[FBNIC_RXB_FIFO_INDICES];
+ struct fbnic_rxb_dequeue_stats deq[FBNIC_RXB_DEQUEUE_INDICES];
+};
+
+struct fbnic_hw_q_stats {
+ struct fbnic_stat_counter rde_pkt_err;
+ struct fbnic_stat_counter rde_pkt_cq_drop;
+ struct fbnic_stat_counter rde_pkt_bdq_drop;
+};
+
struct fbnic_pcie_stats {
struct fbnic_stat_counter ob_rd_tlp, ob_rd_dword;
struct fbnic_stat_counter ob_wr_tlp, ob_wr_dword;
@@ -55,13 +97,19 @@ struct fbnic_pcie_stats {
struct fbnic_hw_stats {
struct fbnic_mac_stats mac;
+ struct fbnic_tmi_stats tmi;
+ struct fbnic_tti_stats tti;
struct fbnic_rpc_stats rpc;
+ struct fbnic_rxb_stats rxb;
+ struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
struct fbnic_pcie_stats pcie;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
+ struct fbnic_hw_q_stats *hw_q);
void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
void fbnic_get_hw_stats(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index dde4a37116e2..10e108c1fcd0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -687,13 +687,10 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
int err = 0, retries = 5;
s32 *sensor;
- fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL);
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
if (!fw_cmpl)
return -ENOMEM;
- /* Initialize completion and queue it for FW to process */
- fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
-
switch (id) {
case FBNIC_SENSOR_TEMP:
sensor = &fw_cmpl->u.tsene.millidegrees;
@@ -744,7 +741,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
*val = *sensor;
exit_cleanup:
- fbnic_fw_clear_compl(fbd);
+ fbnic_fw_clear_cmpl(fbd, fw_cmpl);
exit_free:
fbnic_fw_put_cmpl(fw_cmpl);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 2524d9b88d59..aa812c63d5af 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -404,12 +404,16 @@ static int fbnic_hwtstamp_set(struct net_device *netdev,
static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
+ u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
- u64 rx_bytes, rx_packets, rx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
+ u64 rx_over = 0, rx_missed = 0;
unsigned int start, i;
+ fbnic_get_hw_stats(fbd);
+
stats = &fbn->tx_stats;
tx_bytes = stats->bytes;
@@ -420,6 +424,12 @@ static void fbnic_get_stats64(struct net_device *dev,
stats64->tx_packets = tx_packets;
stats64->tx_dropped = tx_dropped;
+ /* Record drops from Tx HW Datapath */
+ tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
+ fbd->hw_stats.tti.cm_drop.frames.value +
+ fbd->hw_stats.tti.frame_drop.frames.value +
+ fbd->hw_stats.tti.tbi_drop.frames.value;
+
for (i = 0; i < fbn->num_tx_queues; i++) {
struct fbnic_ring *txr = fbn->tx[i];
@@ -445,9 +455,34 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ spin_lock(&fbd->hw_stats_lock);
+ /* Record drops for the host FIFOs.
+ * 4: network to Host, 6: BMC to Host
+ * Exclude the BMC and MC FIFOs as those stats may contain drops
+ * due to unrelated items such as TCAM misses. They are still
+ * accessible through the ethtool stats.
+ */
+ i = FBNIC_RXB_FIFO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+ i = FBNIC_RXB_FIFO_BMC_TO_HOST;
+ rx_missed += fbd->hw_stats.rxb.fifo[i].drop.frames.value;
+
+ for (i = 0; i < fbd->max_num_queues; i++) {
+ /* Report packets dropped due to CQ/BDQ being full/empty */
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_cq_drop.value;
+ rx_over += fbd->hw_stats.hw_q[i].rde_pkt_bdq_drop.value;
+
+ /* Report packets with errors */
+ rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
+ }
+ spin_unlock(&fbd->hw_stats_lock);
+
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
stats64->rx_dropped = rx_dropped;
+ stats64->rx_over_errors = rx_over;
+ stats64->rx_errors = rx_errors;
+ stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
struct fbnic_ring *rxr = fbn->rx[i];
@@ -487,6 +522,7 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
{
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
u64 bytes, packets, alloc_fail;
u64 csum_complete, csum_none;
@@ -510,6 +546,15 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
rx->alloc_fail = alloc_fail;
rx->csum_complete = csum_complete;
rx->csum_none = csum_none;
+
+ fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
+
+ spin_lock(&fbd->hw_stats_lock);
+ rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
+ fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
+ rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
+ rx->hw_drop_overruns;
+ spin_unlock(&fbd->hw_stats_lock);
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 4e8595239c0f..249d3ef862d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <linux/types.h>
+#include <net/devlink.h>
#include "fbnic.h"
#include "fbnic_drvinfo.h"
@@ -292,6 +293,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fbnic_devlink_register(fbd);
fbnic_dbg_fbd_init(fbd);
+ spin_lock_init(&fbd->hw_stats_lock);
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
@@ -387,8 +389,12 @@ static int fbnic_pm_suspend(struct device *dev)
rtnl_unlock();
null_uc_addr:
+ devl_lock(priv_to_devlink(fbd));
+
fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
+
/* Free the IRQs so they aren't trying to occupy sleeping CPUs */
fbnic_free_irqs(fbd);
@@ -419,11 +425,15 @@ static int __fbnic_pm_resume(struct device *dev)
fbd->mac->init_regs(fbd);
+ devl_lock(priv_to_devlink(fbd));
+
/* Re-enable mailbox */
err = fbnic_fw_request_mbx(fbd);
if (err)
goto err_free_irqs;
+ devl_unlock(priv_to_devlink(fbd));
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 1459acfb1e61..64a3b953cc17 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -18,6 +18,8 @@
#define EEPROM_MAC_OFFSET (0x01)
#define MAX_EEPROM_SIZE (512)
#define MAX_OTP_SIZE (1024)
+#define MAX_HS_OTP_SIZE (8 * 1024)
+#define MAX_HS_EEPROM_SIZE (64 * 1024)
#define OTP_INDICATOR_1 (0xF3)
#define OTP_INDICATOR_2 (0xF7)
@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
int ret;
int i;
+ if (offset + length > MAX_HS_OTP_SIZE)
+ return -EINVAL;
+
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (ret < 0)
return ret;
@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
u32 val;
int i;
+ if (offset + length > MAX_HS_EEPROM_SIZE)
+ return -EINVAL;
+
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
if (retval < 0)
return retval;
@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
struct lan743x_adapter *adapter = netdev_priv(netdev);
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
- return MAX_OTP_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
- return MAX_EEPROM_SIZE;
+ return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
}
static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e2d6bfb5d693..9d70b51ca91d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1330,7 +1330,7 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
}
/* PHY */
-static int lan743x_phy_reset(struct lan743x_adapter *adapter)
+static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter)
{
u32 data;
@@ -1346,11 +1346,6 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static int lan743x_phy_init(struct lan743x_adapter *adapter)
-{
- return lan743x_phy_reset(adapter);
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1729,6 +1724,7 @@ int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
default:
return -ERANGE;
}
+ adapter->rx_tstamp_filter = rx_filter;
return 0;
}
@@ -2499,8 +2495,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head,
- GFP_ATOMIC | GFP_DMA)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head, GFP_ATOMIC)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -3352,8 +3347,6 @@ static int lan743x_netdev_ioctl(struct net_device *netdev,
if (!netif_running(netdev))
return -EINVAL;
- if (cmd == SIOCSHWTSTAMP)
- return lan743x_ptp_ioctl(netdev, ifr, cmd);
return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
@@ -3448,6 +3441,8 @@ static const struct net_device_ops lan743x_netdev_ops = {
.ndo_change_mtu = lan743x_netdev_change_mtu,
.ndo_get_stats64 = lan743x_netdev_get_stats64,
.ndo_set_mac_address = lan743x_netdev_set_mac_address,
+ .ndo_hwtstamp_get = lan743x_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = lan743x_ptp_hwtstamp_set,
};
static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
@@ -3495,6 +3490,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
struct lan743x_tx *tx;
+ u32 sgmii_ctl;
int index;
int ret;
@@ -3507,6 +3503,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ } else {
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ }
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3524,10 +3529,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
if (ret)
return ret;
- ret = lan743x_phy_init(adapter);
- if (ret)
- return ret;
-
ret = lan743x_ptp_init(adapter);
if (ret)
return ret;
@@ -3558,7 +3559,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
- u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -3570,10 +3570,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
adapter->mdiobus->priv = (void *)adapter;
if (adapter->is_pci11x1x) {
if (adapter->is_sgmii_en) {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
@@ -3584,10 +3580,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
} else {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
@@ -3673,6 +3665,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
if (ret)
goto cleanup_pci;
+ ret = lan743x_hw_reset_phy(adapter);
+ if (ret)
+ goto cleanup_pci;
+
ret = lan743x_hardware_init(adapter, pdev);
if (ret)
goto cleanup_pci;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index db5fc73e41cc..02a28b709163 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1087,6 +1087,7 @@ struct lan743x_adapter {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ int rx_tstamp_filter;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 0be44dcb3393..a3b48388b3fd 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -463,10 +463,6 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
struct lan743x_ptp_perout *perout = &ptp->perout[index];
int ret = 0;
- /* Reject requests with unsupported flags */
- if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (on) {
perout_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
perout_request->index);
@@ -942,12 +938,6 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
extts = &ptp->extts[index];
- if (extts_request->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
if (on) {
extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
if (extts_pin < 0)
@@ -1543,6 +1533,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
ptp->ptp_clock_info.n_per_out = LAN743X_PTP_N_EVENT_CHAN;
ptp->ptp_clock_info.n_pins = n_pins;
ptp->ptp_clock_info.pps = LAN743X_PTP_N_PPS;
+ ptp->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ ptp->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
ptp->ptp_clock_info.pin_config = ptp->pin_config;
ptp->ptp_clock_info.adjfine = lan743x_ptpci_adjfine;
ptp->ptp_clock_info.adjtime = lan743x_ptpci_adjtime;
@@ -1742,23 +1736,32 @@ void lan743x_ptp_tx_timestamp_skb(struct lan743x_adapter *adapter,
lan743x_ptp_tx_ts_complete(adapter);
}
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct hwtstamp_config config;
- int ret = 0;
- int index;
+ struct lan743x_tx *tx = &adapter->tx[0];
- if (!ifr) {
- netif_err(adapter, drv, adapter->netdev,
- "SIOCSHWTSTAMP, ifr == NULL\n");
- return -EINVAL;
- }
+ if (tx->ts_flags & TX_TS_FLAG_ONE_STEP_SYNC)
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ else if (tx->ts_flags & TX_TS_FLAG_TIMESTAMPING_ENABLED)
+ config->tx_type = HWTSTAMP_TX_ON;
+ else
+ config->tx_type = HWTSTAMP_TX_OFF;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ config->rx_filter = adapter->rx_tstamp_filter;
- switch (config.tx_type) {
+ return 0;
+}
+
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int index;
+
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < adapter->used_tx_channels;
index++)
@@ -1782,19 +1785,12 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
lan743x_ptp_set_sync_ts_insert(adapter, true);
break;
case HWTSTAMP_TX_ONESTEP_P2P:
- ret = -ERANGE;
- break;
+ return -ERANGE;
default:
netif_warn(adapter, drv, adapter->netdev,
- " tx_type = %d, UNKNOWN\n", config.tx_type);
- ret = -EINVAL;
- break;
+ " tx_type = %d, UNKNOWN\n", config->tx_type);
+ return -EINVAL;
}
- ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
-
- if (!ret)
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
- return ret;
+ return lan743x_rx_set_tstamp_mode(adapter, config->rx_filter);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index 0d29914cd460..e8d073bfa2ca 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -51,8 +51,11 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter);
void lan743x_ptp_close(struct lan743x_adapter *adapter);
void lan743x_ptp_update_latency(struct lan743x_adapter *adapter,
u32 link_speed);
-
-int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int lan743x_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int lan743x_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
#define LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS (4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 0af143ec0f86..427bdc0e4908 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -353,6 +353,11 @@ static void lan966x_ifh_set_rew_op(void *ifh, u64 rew_op)
lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD);
}
+static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type)
+{
+ lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE);
+}
+
static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
{
lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP);
@@ -380,6 +385,7 @@ static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
return err;
lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op);
+ lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type);
lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 1efa584e7107..1f9df67f0504 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -75,6 +75,10 @@
#define IFH_REW_OP_ONE_STEP_PTP 0x3
#define IFH_REW_OP_TWO_STEP_PTP 0x4
+#define IFH_PDU_TYPE_NONE 0
+#define IFH_PDU_TYPE_IPV4 7
+#define IFH_PDU_TYPE_IPV6 8
+
#define FDMA_RX_DCB_MAX_DBS 1
#define FDMA_TX_DCB_MAX_DBS 1
@@ -254,6 +258,7 @@ struct lan966x_phc {
struct lan966x_skb_cb {
u8 rew_op;
+ u8 pdu_type;
u16 ts_id;
unsigned long jiffies;
};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 63905bb5a63a..b4377b8613c3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -322,34 +322,55 @@ void lan966x_ptp_hwtstamp_get(struct lan966x_port *port,
*cfg = phc->hwtstamp_config;
}
-static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb)
+static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb,
+ u8 *rew_op, u8 *pdu_type)
{
struct ptp_header *header;
u8 msgtype;
int type;
- if (port->ptp_tx_cmd == IFH_REW_OP_NOOP)
- return IFH_REW_OP_NOOP;
+ if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
type = ptp_classify_raw(skb);
- if (type == PTP_CLASS_NONE)
- return IFH_REW_OP_NOOP;
+ if (type == PTP_CLASS_NONE) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
header = ptp_parse_header(skb, type);
- if (!header)
- return IFH_REW_OP_NOOP;
+ if (!header) {
+ *rew_op = IFH_REW_OP_NOOP;
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ return;
+ }
- if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP)
- return IFH_REW_OP_TWO_STEP_PTP;
+ if (type & PTP_CLASS_L2)
+ *pdu_type = IFH_PDU_TYPE_NONE;
+ if (type & PTP_CLASS_IPV4)
+ *pdu_type = IFH_PDU_TYPE_IPV4;
+ if (type & PTP_CLASS_IPV6)
+ *pdu_type = IFH_PDU_TYPE_IPV6;
+
+ if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
+ return;
+ }
/* If it is sync and run 1 step then set the correct operation,
* otherwise run as 2 step
*/
msgtype = ptp_get_msgtype(header, type);
- if ((msgtype & 0xf) == 0)
- return IFH_REW_OP_ONE_STEP_PTP;
+ if ((msgtype & 0xf) == 0) {
+ *rew_op = IFH_REW_OP_ONE_STEP_PTP;
+ return;
+ }
- return IFH_REW_OP_TWO_STEP_PTP;
+ *rew_op = IFH_REW_OP_TWO_STEP_PTP;
}
static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port)
@@ -374,10 +395,12 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port,
{
struct lan966x *lan966x = port->lan966x;
unsigned long flags;
+ u8 pdu_type;
u8 rew_op;
- rew_op = lan966x_ptp_classify(port, skb);
+ lan966x_ptp_classify(port, skb, &rew_op, &pdu_type);
LAN966X_SKB_CB(skb)->rew_op = rew_op;
+ LAN966X_SKB_CB(skb)->pdu_type = pdu_type;
if (rew_op != IFH_REW_OP_TWO_STEP_PTP)
return 0;
@@ -815,10 +838,6 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp,
bool pps = false;
int pin;
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -917,12 +936,6 @@ static int lan966x_ptp_extts(struct ptp_clock_info *ptp,
if (lan966x->ptp_ext_irq <= 0)
return -EOPNOTSUPP;
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM)
return -EINVAL;
@@ -978,6 +991,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = {
.n_per_out = LAN966X_PHC_PINS_NUM,
.n_ext_ts = LAN966X_PHC_PINS_NUM,
.n_pins = LAN966X_PHC_PINS_NUM,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
};
static int lan966x_ptp_phc_init(struct lan966x *lan966x,
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 2bac6be8f6a0..9c58d9e0bbb5 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -921,7 +921,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
- u16 *max_num_vports)
+ u16 *max_num_vports, u8 *bm_hostmode)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
@@ -932,7 +932,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
@@ -956,11 +956,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
- if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
+ *bm_hostmode = resp.bm_hostmode;
+ else
+ *bm_hostmode = 0;
+
debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
return 0;
@@ -2441,7 +2446,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_hw_vport(apc);
}
@@ -2453,7 +2458,7 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_hw_vport(apc);
if (err)
return err;
@@ -2689,7 +2694,7 @@ int mana_alloc_queues(struct net_device *ndev)
goto destroy_vport;
}
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_filter(apc);
if (err)
goto destroy_vport;
@@ -2751,7 +2756,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_filter(apc);
/* No packet can be transmitted now since apc->port_is_up is false.
@@ -2998,6 +3003,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
+ u8 bm_hostmode = 0;
u16 num_ports = 0;
int err;
int i;
@@ -3026,10 +3032,12 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &num_ports);
+ MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
if (err)
goto out;
+ ac->bm_hostmode = bm_hostmode;
+
if (!resuming) {
ac->num_ports = num_ports;
} else {
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 7663d196eaf8..469784d3a1a6 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -870,23 +870,30 @@ static int ocelot_set_features(struct net_device *dev,
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static int ocelot_port_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- /* If the attached PHY device isn't capable of timestamping operations,
- * use our own (when possible).
- */
- if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return ocelot_hwstamp_set(ocelot, port, ifr);
- case SIOCGHWTSTAMP:
- return ocelot_hwstamp_get(ocelot, port, ifr);
- }
- }
+ ocelot_hwstamp_get(ocelot, port, cfg);
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return 0;
+}
+
+static int ocelot_port_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->port.index;
+
+ return ocelot_hwstamp_set(ocelot, port, cfg, extack);
}
static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
@@ -917,6 +924,8 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
+ .ndo_hwtstamp_get = ocelot_port_hwtstamp_get,
+ .ndo_hwtstamp_set = ocelot_port_hwtstamp_set,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index cc1088988da0..88b5422cc2a0 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -211,11 +211,6 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin == 0)
@@ -519,47 +514,42 @@ static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
return 0;
}
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+void ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct hwtstamp_config cfg = {};
switch (ocelot_port->ptp_cmd) {
case IFH_REW_OP_TWO_STEP_PTP:
- cfg.tx_type = HWTSTAMP_TX_ON;
+ cfg->tx_type = HWTSTAMP_TX_ON;
break;
case IFH_REW_OP_ORIGIN_PTP:
- cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ cfg->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
break;
default:
- cfg.tx_type = HWTSTAMP_TX_OFF;
+ cfg->tx_type = HWTSTAMP_TX_OFF;
break;
}
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd;
bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
- bool old_l2, old_l4;
+ int ptp_cmd;
int err;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
/* Tx type sanity check */
- err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd);
+ err = ocelot_ptp_tx_type_to_cmd(cfg->tx_type, &ptp_cmd);
if (err)
return err;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -582,27 +572,15 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
return -ERANGE;
}
- old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2;
- old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4;
-
err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
if (err)
return err;
ocelot_port->ptp_cmd = ptp_cmd;
- cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
-
- if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) {
- err = -EFAULT;
- goto out_restore_ptp_traps;
- }
+ cfg->rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
return 0;
-out_restore_ptp_traps:
- ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4);
- ocelot_port->ptp_cmd = old_ptp_cmd;
- return err;
}
EXPORT_SYMBOL(ocelot_hwstamp_set);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 055b55651a49..498eec8ae61d 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -108,6 +108,8 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 05606692e631..dd279788cf9e 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -846,7 +846,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- i = pci_request_regions(pdev, DRV_NAME);
+ i = pcim_request_all_regions(pdev, DRV_NAME);
if (i)
goto err_pci_request_regions;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 671af5d4c5d2..9e7c285eaa6b 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -266,17 +266,17 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+static int nfp_net_xfrm_add_state(struct net_device *dev,
+ struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
struct nfp_ipsec_cfg_mssg msg = {};
int i, key_len, trunc_len, err = 0;
struct nfp_ipsec_cfg_add_sa *cfg;
struct nfp_net *nn;
unsigned int saidx;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
cfg = &msg.cfg_add_sa;
/* General */
@@ -546,17 +546,16 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
return 0;
}
-static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+static void nfp_net_xfrm_del_state(struct net_device *dev, struct xfrm_state *x)
{
struct nfp_ipsec_cfg_mssg msg = {
.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
.sa_idx = x->xso.offload_handle - 1,
};
- struct net_device *netdev = x->xso.real_dev;
struct nfp_net *nn;
int err;
- nn = netdev_priv(netdev);
+ nn = netdev_priv(dev);
err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
sizeof(msg), nfp_net_ipsec_cfg);
if (err)
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index f1c6c47564b1..08086eb76996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -779,7 +779,7 @@ nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ebeb6ab4465c..ab3cd06ed63e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -779,7 +779,7 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
- (__force __wsum)__get_unaligned_cpu32(data);
+ (__force __wsum)get_unaligned((u32 *)data);
data += 4;
break;
case NFP_NET_META_RESYNC_INFO:
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 95514fabadf2..28997ddab966 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2538,7 +2538,7 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
nn->dp.num_r_vecs, num_online_cpus());
nn->max_r_vecs = nn->dp.num_r_vecs;
- nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
+ nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(*nn->dp.xsk_pools),
GFP_KERNEL);
if (!nn->dp.xsk_pools) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index a2d4336d2766..92f30ff2d631 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -948,64 +948,20 @@ static int ionic_get_tunable(struct net_device *netdev,
return 0;
}
-static int ionic_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
-
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- struct sfp_eeprom_base *sfp;
-
- xcvr = &idev->port_info->status.xcvr;
- sfp = (struct sfp_eeprom_base *) xcvr->sprom;
-
- /* report the module data type and length */
- switch (sfp->phys_id) {
- case SFF8024_ID_SFP:
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- case SFF8024_ID_QSFP_8436_8636:
- case SFF8024_ID_QSFP28_8636:
- case SFF8024_ID_QSFP_PLUS_CMIS:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
- default:
- netdev_info(netdev, "unknown xcvr type 0x%02x\n",
- xcvr->sprom[0]);
- modinfo->type = 0;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- break;
- }
-
- return 0;
-}
-
-static int ionic_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee,
- u8 *data)
+static int ionic_do_module_copy(u8 *dst, u8 *src, u32 len)
{
- struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_xcvr_status *xcvr;
- char tbuf[sizeof(xcvr->sprom)];
+ char tbuf[sizeof_field(struct ionic_xcvr_status, sprom)];
int count = 10;
- u32 len;
/* The NIC keeps the module prom up-to-date in the DMA space
* so we can simply copy the module bytes into the data buffer.
*/
- xcvr = &idev->port_info->status.xcvr;
- len = min_t(u32, sizeof(xcvr->sprom), ee->len);
-
do {
- memcpy(data, &xcvr->sprom[ee->offset], len);
- memcpy(tbuf, &xcvr->sprom[ee->offset], len);
+ memcpy(dst, src, len);
+ memcpy(tbuf, src, len);
/* Let's make sure we got a consistent copy */
- if (!memcmp(data, tbuf, len))
+ if (!memcmp(dst, tbuf, len))
break;
} while (--count);
@@ -1016,6 +972,48 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev = &lif->ionic->idev;
+ u32 err = -EINVAL;
+ u8 *src;
+
+ if (!page_data->length)
+ return -EINVAL;
+
+ if (page_data->bank != 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Only bank 0 is supported");
+ return -EINVAL;
+ }
+
+ switch (page_data->page) {
+ case 0:
+ src = &idev->port_info->status.xcvr.sprom[page_data->offset];
+ break;
+ case 1:
+ src = &idev->port_info->sprom_page1[page_data->offset - 128];
+ break;
+ case 2:
+ src = &idev->port_info->sprom_page2[page_data->offset - 128];
+ break;
+ case 17:
+ src = &idev->port_info->sprom_page17[page_data->offset - 128];
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memset(page_data->data, 0, page_data->length);
+ err = ionic_do_module_copy(page_data->data, src, page_data->length);
+ if (err)
+ return err;
+
+ return page_data->length;
+}
+
static int ionic_get_ts_info(struct net_device *netdev,
struct kernel_ethtool_ts_info *info)
{
@@ -1161,8 +1159,7 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.set_rxfh = ionic_set_rxfh,
.get_tunable = ionic_get_tunable,
.set_tunable = ionic_set_tunable,
- .get_module_info = ionic_get_module_info,
- .get_module_eeprom = ionic_get_module_eeprom,
+ .get_module_eeprom_by_page = ionic_get_module_eeprom_by_page,
.get_pauseparam = ionic_get_pauseparam,
.set_pauseparam = ionic_set_pauseparam,
.get_fecparam = ionic_get_fecparam,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 830c8adbfbee..f1ddbe9994a3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -2839,6 +2839,10 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
+ * @sprom_epage: Extended Transceiver sprom
+ * @sprom_page1: Extended Transceiver sprom, page 1
+ * @sprom_page2: Extended Transceiver sprom, page 2
+ * @sprom_page17: Extended Transceiver sprom, page 17
* @rsvd: reserved byte(s)
* @pb_stats: uplink pb drop stats
*/
@@ -2849,8 +2853,17 @@ struct ionic_port_info {
struct ionic_port_stats stats;
struct ionic_mgmt_port_stats mgmt_stats;
};
- /* room for pb_stats to start at 2k offset */
- u8 rsvd[760];
+ union {
+ u8 sprom_epage[384];
+ struct {
+ u8 sprom_page1[128];
+ u8 sprom_page2[128];
+ u8 sprom_page17[128];
+ };
+ };
+ u8 rsvd[376];
+
+ /* pb_stats must start at 2k offset */
struct ionic_port_pb_stats pb_stats;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b7def3b54937..016b575861b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -939,7 +939,6 @@ u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
index f6cd1b3efdfd..27e91d0d39f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -1305,37 +1305,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
- * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MVP trace dump buffer, starting from the header.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
- *
- * @p_hwfn: HW device data.
- * @dump_buf: MCP trace dump buffer, starting from the header.
- * @num_dumped_bytes: Number of bytes that were dumped.
- * @results_buf: Buffer for printing the mcp trace results.
- *
- * Return: Error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
* qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
* Should be called after continuous MCP Trace parsing.
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 464a72afb758..9c3d3dd2f847 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -7614,31 +7614,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size, true);
}
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf)
-{
- u32 parsed_buf_size;
-
- return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
- &parsed_buf_size, false);
-}
-
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf)
-{
- u32 parsed_results_bytes;
-
- return qed_parse_mcp_trace_buf(p_hwfn,
- dump_buf,
- num_dumped_bytes,
- 0,
- num_dumped_bytes,
- results_buf, &parsed_results_bytes);
-}
-
/* Frees the specified MCP Trace meta data */
void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 86a93cac2647..9659ce5b0712 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -255,25 +255,6 @@ static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
p_hwfn->db_recovery_info.db_recovery_counter = 0;
}
-/* Print the content of the doorbell recovery mechanism */
-void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
-{
- struct qed_db_recovery_entry *db_entry = NULL;
-
- DP_NOTICE(p_hwfn,
- "Displaying doorbell recovery database. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
-
- /* Protect the list */
- spin_lock_bh(&p_hwfn->db_recovery_info.lock);
- list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
- }
-
- spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
-}
-
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
struct qed_db_recovery_entry *db_entry)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index ed1a84542ad2..10e355397cee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2666,58 +2666,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-/**
- * qed_calc_session_ctx_validation(): Calcualte validation byte for
- * session context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @cid: Context cid.
- *
- * Return: Void.
- */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid);
-
-/**
- * qed_calc_task_ctx_validation(): Calcualte validation byte for task
- * context.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Context size.
- * @ctx_type: Context type.
- * @tid: Context tid.
- *
- * Return: Void.
- */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid);
-
-/**
- * qed_memset_session_ctx(): Memset session context to 0 while
- * preserving validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: Size to initialzie.
- * @ctx_type: Context type.
- *
- * Return: Void.
- */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
-/**
- * qed_memset_task_ctx(): Memset task context to 0 while preserving
- * validation bytes.
- *
- * @p_ctx_mem: Pointer to context memory.
- * @ctx_size: size to initialzie.
- * @ctx_type: context type.
- *
- * Return: Void.
- */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
-
#define NUM_STORMS 6
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 9e5f0dbc8a07..9907973399dc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -69,17 +69,6 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
return 0;
}
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
-{
- struct qed_ptt *p_ptt;
- int i;
-
- for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
- p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
- p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
- }
-}
-
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_ptt_pool);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e535983ce21b..3c98f58a184f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -62,15 +62,6 @@ enum _dmae_cmd_crc_mask {
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
- *
- * @p_hwfn: HW device data.
- *
- * Return: Void.
- */
-void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
-
-/**
* qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
* @p_hwfn: HW device data.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 407029a36fa1..aa20bb8caa9a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -18,16 +18,6 @@
#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
- {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
-};
-
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
- {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
-};
-
/* General constants */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
@@ -1576,134 +1566,6 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
-DECLARE_CRC8_TABLE(cdu_crc8_table);
-
-/* Calculate and return CDU validation byte per connection type/region/cid */
-static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
-{
- const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
- u8 crc, validation_byte = 0;
- static u8 crc8_table_valid; /* automatically initialized to 0 */
- u32 validation_string = 0;
- __be32 data_to_crc;
-
- if (!crc8_table_valid) {
- crc8_populate_msb(cdu_crc8_table, 0x07);
- crc8_table_valid = 1;
- }
-
- /* The CRC is calculated on the String-to-compress:
- * [31:8] = {CID[31:20],CID[11:0]}
- * [7:4] = Region
- * [3:0] = Type
- */
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
- validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
- validation_string |= ((region & 0xF) << 4);
-
- if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
- validation_string |= (conn_type & 0xF);
-
- /* Convert to big-endian and calculate CRC8 */
- data_to_crc = cpu_to_be32(validation_string);
- crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
- CRC8_INIT_VALUE);
-
- /* The validation byte [7:0] is composed:
- * for type A validation
- * [7] = active configuration bit
- * [6:0] = crc[6:0]
- *
- * for type B validation
- * [7] = active configuration bit
- * [6:3] = connection_type[3:0]
- * [2:0] = crc[2:0]
- */
- validation_byte |=
- ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
-
- if ((validation_cfg >>
- CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
- validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
- else
- validation_byte |= crc & 0x7F;
-
- return validation_byte;
-}
-
-/* Calcualte and set validation bytes for session context */
-void qed_calc_session_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
-}
-
-/* Calcualte and set validation bytes for task context */
-void qed_calc_task_ctx_validation(void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
-{
- u8 *p_ctx, *region1_val_ptr;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
-}
-
-/* Memset session context to 0 while preserving validation bytes */
-void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
- u8 x_val, t_val, u_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
- t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
- u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
-
- x_val = *x_val_ptr;
- t_val = *t_val_ptr;
- u_val = *u_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *x_val_ptr = x_val;
- *t_val_ptr = t_val;
- *u_val_ptr = u_val;
-}
-
-/* Memset task context to 0 while preserving validation bytes */
-void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
-{
- u8 *p_ctx, *region1_val_ptr;
- u8 region1_val;
-
- p_ctx = (u8 * const)p_ctx_mem;
- region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
-
- region1_val = *region1_val_ptr;
-
- memset(p_ctx, 0, ctx_size);
-
- *region1_val_ptr = region1_val;
-}
-
/* Enable and configure context validation */
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 99df00c30b8c..b5d744d2586f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
};
static struct qed_eth_cb_ops qede_ll_ops = {
- {
+ .common = {
#ifdef CONFIG_RFS_ACCEL
.arfs_filter_op = qede_arfs_filter_op,
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 28d24d59efb8..d57b976b9040 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
}
cmd_op = (cmd.rsp.arg[0] & 0xff);
- if (cmd.rsp.arg[0] >> 25 == 2)
- return 2;
+ if (cmd.rsp.arg[0] >> 25 == 2) {
+ ret = 2;
+ goto out;
+ }
+
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9210ff360fdc..a4434eb38950 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -52,7 +52,6 @@ config QCOM_EMAC
depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
- select MDIO_DEVRES
help
This driver supports the Qualcomm Technologies, Inc. Gigabit
Ethernet Media Access Controller (EMAC). The controller
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 7a194a8ab989..2c1a0c21af8d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -64,16 +64,15 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
- RTL_GIGA_MAC_VER_53,
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_VER_64,
- RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
RTL_GIGA_MAC_VER_70,
- RTL_GIGA_MAC_VER_71,
- RTL_GIGA_MAC_NONE
+ RTL_GIGA_MAC_VER_80,
+ RTL_GIGA_MAC_NONE,
+ RTL_GIGA_MAC_VER_LAST = RTL_GIGA_MAC_NONE - 1
};
struct rtl8169_private;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 4eebd9cb40a3..43170500d566 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -60,6 +60,7 @@
#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
+#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -91,61 +92,117 @@
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
-static const struct {
+static const struct rtl_chip_info {
+ u16 mask;
+ u16 val;
+ enum mac_version mac_version;
const char *name;
const char *fw_name;
} rtl_chip_infos[] = {
- /* PCI devices. */
- [RTL_GIGA_MAC_VER_02] = {"RTL8169s" },
- [RTL_GIGA_MAC_VER_03] = {"RTL8110s" },
- [RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb" },
- [RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc" },
- [RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc" },
- /* PCI-E devices. */
- [RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
- [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c" },
- [RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
- [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
- [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
- [RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp" },
- [RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e", FIRMWARE_8168E_1},
- [RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e", FIRMWARE_8168E_2},
- [RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl", FIRMWARE_8168E_3},
- [RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f", FIRMWARE_8168F_1},
- [RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f", FIRMWARE_8168F_2},
- [RTL_GIGA_MAC_VER_37] = {"RTL8402", FIRMWARE_8402_1 },
- [RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
- [RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
- [RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
- [RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
- [RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
- [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
- /* reserve 62 for CFG_METHOD_4 in the vendor driver */
- [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
- [RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2},
- [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3},
+ /* 8127A family. */
+ { 0x7cf, 0x6c9, RTL_GIGA_MAC_VER_80, "RTL8127A", FIRMWARE_8127A_1 },
+
+ /* 8126A family. */
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_3 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70, "RTL8126A", FIRMWARE_8126A_2 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
+
+ /* 8125D family. */
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
+ { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
+
+ /* 8125B family. */
+ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63, "RTL8125B", FIRMWARE_8125B_2 },
+
+ /* 8125A family. */
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61, "RTL8125A", FIRMWARE_8125A_3 },
+
+ /* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117" },
+ { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52, "RTL8168fp/RTL8117",
+ FIRMWARE_8168FP_3 },
+
+ /* 8168EP family. */
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51, "RTL8168ep/8111ep" },
+
+ /* 8168H family. */
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46, "RTL8168h/8111h",
+ FIRMWARE_8168H_2 },
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46, "RTL8168M", FIRMWARE_8168H_2 },
+
+ /* 8168G family. */
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44, "RTL8411b", FIRMWARE_8411_2 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42, "RTL8168gu/8111gu",
+ FIRMWARE_8168G_3 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40, "RTL8168g/8111g",
+ FIRMWARE_8168G_2 },
+
+ /* 8168F family. */
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38, "RTL8411", FIRMWARE_8411_1 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36, "RTL8168f/8111f",
+ FIRMWARE_8168F_2 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35, "RTL8168f/8111f",
+ FIRMWARE_8168F_1 },
+
+ /* 8168E family. */
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34, "RTL8168evl/8111evl",
+ FIRMWARE_8168E_3 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32, "RTL8168e/8111e",
+ FIRMWARE_8168E_1 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33, "RTL8168e/8111e",
+ FIRMWARE_8168E_2 },
+
+ /* 8168D family. */
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25, "RTL8168d/8111d",
+ FIRMWARE_8168D_1 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26, "RTL8168d/8111d",
+ FIRMWARE_8168D_2 },
+
+ /* 8168DP family. */
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28, "RTL8168dp/8111dp" },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31, "RTL8168dp/8111dp" },
+
+ /* 8168C family. */
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18, "RTL8168cp/8111cp" },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24, "RTL8168cp/8111cp" },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20, "RTL8168c/8111c" },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21, "RTL8168c/8111c" },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22, "RTL8168c/8111c" },
+
+ /* 8168B family. */
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17, "RTL8168b/8111b" },
+ /* This one is very old and rare, support has been removed.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11, "RTL8168b/8111b" },
+ */
+
+ /* 8101 family. */
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39, "RTL8106e", FIRMWARE_8106E_1 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37, "RTL8402", FIRMWARE_8402_1 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30, "RTL8105e", FIRMWARE_8105E_1 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08, "RTL8102e" },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07, "RTL8102e" },
+ { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14, "RTL8401" },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09, "RTL8102e/RTL8103e" },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10, "RTL8101e/RTL8100e" },
+
+ /* 8110 family. */
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05, "RTL8169sc/8110sc" },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04, "RTL8169sb/8110sb" },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03, "RTL8110s" },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02, "RTL8169s" },
+
+ /* Catch-all */
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -169,8 +226,10 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
+ { PCI_VDEVICE(REALTEK, 0x8127) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
{ PCI_VDEVICE(REALTEK, 0x5000) },
+ { PCI_VDEVICE(REALTEK, 0x0e10) },
{}
};
@@ -716,6 +775,7 @@ MODULE_FIRMWARE(FIRMWARE_8125D_2);
MODULE_FIRMWARE(FIRMWARE_8125BP_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
+MODULE_FIRMWARE(FIRMWARE_8127A_1);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -777,7 +837,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_53;
+ tp->mac_version <= RTL_GIGA_MAC_VER_52;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -945,9 +1005,7 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (type == ERIAR_OOB &&
- (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
- tp->mac_version == RTL_GIGA_MAC_VER_53))
+ if (type == ERIAR_OOB && tp->mac_version == RTL_GIGA_MAC_VER_52)
*cmd |= 0xf70 << 18;
}
@@ -1236,7 +1294,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1251,7 +1309,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1447,7 +1505,7 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return RTL_DASH_DP;
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
case RTL_GIGA_MAC_VER_66:
return RTL_DASH_25_BP;
@@ -1603,7 +1661,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_LAST:
r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
@@ -2076,7 +2134,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2265,151 +2323,30 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_eth_ctrl_stats = rtl8169_get_eth_ctrl_stats,
};
-static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
+static const struct rtl_chip_info *rtl8169_get_chip_version(u16 xid, bool gmii)
{
- /*
- * The driver currently handles the 8168Bf and the 8168Be identically
- * but they can be identified more specifically through the test below
- * if needed:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
- *
- * Same thing for the 8101Eb and the 8101Ec:
- *
- * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
- */
- static const struct rtl_mac_info {
- u16 mask;
- u16 val;
- enum mac_version ver;
- } mac_info[] = {
- /* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 },
-
- /* 8125BP family. */
- { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 },
-
- /* 8125D family. */
- { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 },
- { 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
-
- /* 8125B family. */
- { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
-
- /* 8125A family. */
- { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
- /* It seems only XID 609 made it to the mass market.
- * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
- */
-
- /* RTL8117 */
- { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
- { 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
-
- /* 8168EP family. */
- { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
- */
-
- /* 8168H family. */
- { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
- */
- /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
- { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
-
- /* 8168G family. */
- { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
- { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- /* It seems this chip version never made it to
- * the wild. Let's disable detection.
- * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
- */
- { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
-
- /* 8168F family. */
- { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
- { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
-
- /* 8168E family. */
- { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
- { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
- { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
-
- /* 8168D family. */
- { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
- { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
-
- /* 8168DP family. */
- /* It seems this early RTL8168dp version never made it to
- * the wild. Support has been removed.
- * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
- */
- { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
- { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
-
- /* 8168C family. */
- { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
- { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
- { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
- { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
- { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
- { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
- { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
-
- /* 8168B family. */
- { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, support has been removed.
- * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
- */
-
- /* 8101 family. */
- { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
- { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
- { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
- { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
- { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
- { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
-
- /* 8110 family. */
- { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
- { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
- { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
- { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
- { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
-
- /* Catch-all */
- { 0x000, 0x000, RTL_GIGA_MAC_NONE }
+ /* Chips combining a 1Gbps MAC with a 100Mbps PHY */
+ static const struct rtl_chip_info rtl8106eus_info = {
+ .mac_version = RTL_GIGA_MAC_VER_43,
+ .name = "RTL8106eus",
+ .fw_name = FIRMWARE_8106E_2,
};
- const struct rtl_mac_info *p = mac_info;
- enum mac_version ver;
+ static const struct rtl_chip_info rtl8107e_info = {
+ .mac_version = RTL_GIGA_MAC_VER_48,
+ .name = "RTL8107e",
+ .fw_name = FIRMWARE_8107E_2,
+ };
+ const struct rtl_chip_info *p = rtl_chip_infos;
while ((xid & p->mask) != p->val)
p++;
- ver = p->ver;
- if (ver != RTL_GIGA_MAC_NONE && !gmii) {
- if (ver == RTL_GIGA_MAC_VER_42)
- ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_46)
- ver = RTL_GIGA_MAC_VER_48;
- }
+ if (p->mac_version == RTL_GIGA_MAC_VER_42 && !gmii)
+ return &rtl8106eus_info;
+ if (p->mac_version == RTL_GIGA_MAC_VER_46 && !gmii)
+ return &rtl8107e_info;
- return ver;
+ return p;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2553,13 +2490,13 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2684,14 +2621,14 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2852,10 +2789,23 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_csi_mod(struct rtl8169_private *tp, int addr,
+ u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN(addr % 4, "Invalid CSI address %#x\n", addr);
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+
+ val = rtl_csi_read(tp, addr);
+ rtl_csi_write(tp, addr, (val & ~mask) | set);
+}
+
static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
int rc;
u8 val;
@@ -2872,16 +2822,12 @@ static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
}
}
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
- rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
+ rtl_csi_mod(tp, RTL_GEN3_RELATED_OFF, RTL_GEN3_ZRXDC_NONCOMPL, 0);
}
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
- u32 csi;
/* According to Realtek the value at config space address 0x070f
* controls the L0s/L1 entrance latency. We try standard ECAM access
@@ -2893,10 +2839,7 @@ static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
return;
- netdev_notice_once(tp->dev,
- "No native access to PCI extended config space, falling back to CSI\n");
- csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
- rtl_csi_write(tp, 0x070c, csi | val << 24);
+ rtl_csi_mod(tp, 0x070c, 0xff000000, val << 24);
}
static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
@@ -2960,7 +2903,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2974,7 +2917,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -3001,7 +2944,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_70:
- case RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3012,7 +2955,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -3024,7 +2967,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -3033,7 +2976,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_70:
- case RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_80:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3754,11 +3697,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
- tp->mac_version == RTL_GIGA_MAC_VER_71)
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
- tp->mac_version == RTL_GIGA_MAC_VER_71)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0f00, 0x0f00);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_70)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3777,7 +3721,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
- tp->mac_version == RTL_GIGA_MAC_VER_71)
+ tp->mac_version == RTL_GIGA_MAC_VER_80)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3855,6 +3799,12 @@ static void rtl_hw_start_8126a(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8127a(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -3893,14 +3843,12 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d,
[RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
[RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_80] = rtl_hw_start_8127a,
};
if (hw_configs[tp->mac_version])
@@ -3917,14 +3865,15 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
- case RTL_GIGA_MAC_VER_65:
case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_80)
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
break;
case RTL_GIGA_MAC_VER_63:
case RTL_GIGA_MAC_VER_70:
- case RTL_GIGA_MAC_VER_71:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4156,7 +4105,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_LAST:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4313,7 +4262,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5101,10 +5050,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
- pci_wake_from_d3(pdev, tp->saved_wolopts);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled)
+ pci_prepare_to_sleep(pdev);
}
static void rtl_remove_one(struct pci_dev *pdev)
@@ -5358,13 +5305,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
rtl_hw_init_8125(tp);
break;
default:
@@ -5389,7 +5336,7 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
/* RTL8125/8126 */
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_LAST:
return JUMBO_16K;
default:
return JUMBO_9K;
@@ -5434,9 +5381,9 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct rtl_chip_info *chip;
struct rtl8169_private *tp;
int jumbo_max, region, rc;
- enum mac_version chipset;
struct net_device *dev;
u32 txconfig;
u16 xid;
@@ -5486,12 +5433,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
xid = (txconfig >> 20) & 0xfcf;
/* Identify chip attached to board */
- chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
- if (chipset == RTL_GIGA_MAC_NONE)
+ chip = rtl8169_get_chip_version(xid, tp->supports_gmii);
+ if (chip->mac_version == RTL_GIGA_MAC_NONE)
return dev_err_probe(&pdev->dev, -ENODEV,
"unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
xid);
- tp->mac_version = chipset;
+ tp->mac_version = chip->mac_version;
+ tp->fw_name = chip->fw_name;
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
@@ -5596,8 +5544,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_irq_mask(tp);
- tp->fw_name = rtl_chip_infos[chipset].fw_name;
-
tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
&tp->counters_phys_addr,
GFP_KERNEL);
@@ -5622,7 +5568,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
+ chip->name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index cf95e579c65d..032d9d2cfa2a 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -50,6 +50,15 @@ static void r8168g_phy_param(struct phy_device *phydev, u16 parm,
phy_restore_page(phydev, oldpage, 0);
}
+static void rtl8125_phy_param(struct phy_device *phydev, u16 parm,
+ u16 mask, u16 val)
+{
+ phy_lock_mdio_bus(phydev);
+ __phy_write_mmd(phydev, MDIO_MMD_VEND2, 0xb87c, parm);
+ __phy_modify_mmd(phydev, MDIO_MMD_VEND2, 0xb87e, mask, val);
+ phy_unlock_mdio_bus(phydev);
+}
+
struct phy_reg {
u16 reg;
u16 val;
@@ -1004,12 +1013,8 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80a2);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x16, 0x809c);
- phy_write(phydev, 0x17, 0x0153);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80a2, 0xffff, 0x0153);
+ rtl8125_phy_param(phydev, 0x809c, 0xffff, 0x0153);
phy_write(phydev, 0x1f, 0x0a43);
phy_write(phydev, 0x13, 0x81B3);
@@ -1061,14 +1066,9 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x80f5);
- phy_write(phydev, 0x17, 0x760e);
- phy_write(phydev, 0x16, 0x8107);
- phy_write(phydev, 0x17, 0x360e);
- phy_write(phydev, 0x16, 0x8551);
- phy_modify(phydev, 0x17, 0xff00, 0x0800);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x80f5, 0xffff, 0x760e);
+ rtl8125_phy_param(phydev, 0x8107, 0xffff, 0x360e);
+ rtl8125_phy_param(phydev, 0x8551, 0xff00, 0x0800);
phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000);
phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300);
@@ -1110,12 +1110,8 @@ static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
- phy_write(phydev, 0x1f, 0x0b87);
- phy_write(phydev, 0x16, 0x8088);
- phy_modify(phydev, 0x17, 0xff00, 0x9000);
- phy_write(phydev, 0x16, 0x808f);
- phy_modify(phydev, 0x17, 0xff00, 0x9000);
- phy_write(phydev, 0x1f, 0x0000);
+ rtl8125_phy_param(phydev, 0x8088, 0xff00, 0x9000);
+ rtl8125_phy_param(phydev, 0x808f, 0xff00, 0x9000);
r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
@@ -1134,6 +1130,171 @@ static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
rtl8125_common_config_eee_phy(phydev);
}
+static void rtl8127a_1_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8415, 0xff00, 0x9300);
+ r8168g_phy_param(phydev, 0x81a3, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81ae, 0xff00, 0x0f00);
+ r8168g_phy_param(phydev, 0x81b9, 0xff00, 0xb900);
+ rtl8125_phy_param(phydev, 0x83b0, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83C5, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83da, 0x0e00, 0x0000);
+ rtl8125_phy_param(phydev, 0x83ef, 0x0e00, 0x0000);
+ phy_modify_paged(phydev, 0x0bf3, 0x14, 0x01f0, 0x0160);
+ phy_modify_paged(phydev, 0x0bf3, 0x15, 0x001f, 0x0014);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x6000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x16, 0xc000, 0x0000);
+ phy_modify_paged(phydev, 0x0bf2, 0x14, 0x1fff, 0x0187);
+ phy_modify_paged(phydev, 0x0bf2, 0x15, 0x003f, 0x0003);
+
+ r8168g_phy_param(phydev, 0x8173, 0xffff, 0x8620);
+ r8168g_phy_param(phydev, 0x8175, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x817c, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8187, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x8192, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x819d, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x81a8, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81b3, 0x2000, 0x0000);
+ r8168g_phy_param(phydev, 0x81be, 0x0000, 0x2000);
+ r8168g_phy_param(phydev, 0x817d, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8188, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x8193, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x819e, 0xff00, 0xa600);
+ r8168g_phy_param(phydev, 0x81a9, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81b4, 0xff00, 0x1400);
+ r8168g_phy_param(phydev, 0x81bf, 0xff00, 0xa600);
+
+ phy_modify_paged(phydev, 0x0aea, 0x15, 0x0028, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f0, 0xffff, 0x201c);
+ rtl8125_phy_param(phydev, 0x84f2, 0xffff, 0x3117);
+
+ phy_write_paged(phydev, 0x0aec, 0x13, 0x0000);
+ phy_write_paged(phydev, 0x0ae2, 0x10, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x17, 0xffff);
+ phy_write_paged(phydev, 0x0aed, 0x11, 0xffff);
+ phy_write_paged(phydev, 0x0aec, 0x14, 0x0000);
+ phy_modify_paged(phydev, 0x0aed, 0x10, 0x0001, 0x0000);
+ phy_write_paged(phydev, 0x0adb, 0x14, 0x0150);
+ rtl8125_phy_param(phydev, 0x8197, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x8231, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cb, 0xff00, 0x5000);
+ rtl8125_phy_param(phydev, 0x82cd, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8233, 0xff00, 0x5700);
+ rtl8125_phy_param(phydev, 0x8199, 0xff00, 0x5700);
+
+ rtl8125_phy_param(phydev, 0x815a, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81f4, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x828e, 0xffff, 0x0150);
+ rtl8125_phy_param(phydev, 0x81b1, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x824b, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x82e5, 0xffff, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x84f7, 0xff00, 0x2800);
+ phy_modify_paged(phydev, 0x0aec, 0x11, 0x0000, 0x1000);
+ rtl8125_phy_param(phydev, 0x81b3, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x824d, 0xff00, 0xad00);
+ rtl8125_phy_param(phydev, 0x82e7, 0xff00, 0xad00);
+ phy_modify_paged(phydev, 0x0ae4, 0x17, 0x000f, 0x0001);
+ rtl8125_phy_param(phydev, 0x82ce, 0xf000, 0x4000);
+
+ rtl8125_phy_param(phydev, 0x84ac, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84ae, 0xffff, 0x0000);
+ rtl8125_phy_param(phydev, 0x84b0, 0xffff, 0xf818);
+ rtl8125_phy_param(phydev, 0x84b2, 0xff00, 0x6000);
+
+ rtl8125_phy_param(phydev, 0x8ffc, 0xffff, 0x6008);
+ rtl8125_phy_param(phydev, 0x8ffe, 0xffff, 0xf450);
+
+ rtl8125_phy_param(phydev, 0x8015, 0x0000, 0x0200);
+ rtl8125_phy_param(phydev, 0x8016, 0x0800, 0x0000);
+ rtl8125_phy_param(phydev, 0x8fe6, 0xff00, 0x0800);
+ rtl8125_phy_param(phydev, 0x8fe4, 0xffff, 0x2114);
+
+ rtl8125_phy_param(phydev, 0x8647, 0xffff, 0xa7b1);
+ rtl8125_phy_param(phydev, 0x8649, 0xffff, 0xbbca);
+ rtl8125_phy_param(phydev, 0x864b, 0xff00, 0xdc00);
+
+ rtl8125_phy_param(phydev, 0x8154, 0xc000, 0x4000);
+ rtl8125_phy_param(phydev, 0x8158, 0xc000, 0x0000);
+
+ rtl8125_phy_param(phydev, 0x826c, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x826e, 0xffff, 0xffff);
+
+ rtl8125_phy_param(phydev, 0x8872, 0xff00, 0x0e00);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x0800);
+ r8168g_phy_param(phydev, 0x8012, 0x0000, 0x4000);
+ phy_modify_paged(phydev, 0x0b57, 0x13, 0x0000, 0x0001);
+ r8168g_phy_param(phydev, 0x834a, 0xff00, 0x0700);
+ rtl8125_phy_param(phydev, 0x8217, 0x3f00, 0x2a00);
+ r8168g_phy_param(phydev, 0x81b1, 0xff00, 0x0b00);
+ rtl8125_phy_param(phydev, 0x8fed, 0xff00, 0x4e00);
+
+ rtl8125_phy_param(phydev, 0x88ac, 0xff00, 0x2300);
+ phy_modify_paged(phydev, 0x0bf0, 0x16, 0x0000, 0x3800);
+ rtl8125_phy_param(phydev, 0x88de, 0xff00, 0x0000);
+ rtl8125_phy_param(phydev, 0x80b4, 0xffff, 0x5195);
+
+ r8168g_phy_param(phydev, 0x8370, 0xffff, 0x8671);
+ r8168g_phy_param(phydev, 0x8372, 0xffff, 0x86c8);
+
+ r8168g_phy_param(phydev, 0x8401, 0xffff, 0x86c8);
+ r8168g_phy_param(phydev, 0x8403, 0xffff, 0x86da);
+ r8168g_phy_param(phydev, 0x8406, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8408, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840a, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840c, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x840e, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8410, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8412, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8414, 0x1800, 0x1000);
+ r8168g_phy_param(phydev, 0x8416, 0x1800, 0x1000);
+
+ r8168g_phy_param(phydev, 0x82bd, 0xffff, 0x1f40);
+
+ phy_modify_paged(phydev, 0x0bfb, 0x12, 0x07ff, 0x0328);
+ phy_write_paged(phydev, 0x0bfb, 0x13, 0x3e14);
+
+ r8168g_phy_param(phydev, 0x81c4, 0xffff, 0x003b);
+ r8168g_phy_param(phydev, 0x81c6, 0xffff, 0x0086);
+ r8168g_phy_param(phydev, 0x81c8, 0xffff, 0x00b7);
+ r8168g_phy_param(phydev, 0x81ca, 0xffff, 0x00db);
+ r8168g_phy_param(phydev, 0x81cc, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81ce, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d0, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d2, 0xffff, 0x00fe);
+ r8168g_phy_param(phydev, 0x81d4, 0xffff, 0x00c3);
+ r8168g_phy_param(phydev, 0x81d6, 0xffff, 0x0078);
+ r8168g_phy_param(phydev, 0x81d8, 0xffff, 0x0047);
+ r8168g_phy_param(phydev, 0x81da, 0xffff, 0x0023);
+
+ rtl8125_phy_param(phydev, 0x88d7, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x88d9, 0xffff, 0x01a0);
+ rtl8125_phy_param(phydev, 0x8ffa, 0xffff, 0x002a);
+
+ rtl8125_phy_param(phydev, 0x8fee, 0xffff, 0xffdf);
+ rtl8125_phy_param(phydev, 0x8ff0, 0xffff, 0xffff);
+ rtl8125_phy_param(phydev, 0x8ff2, 0xffff, 0x0a4a);
+ rtl8125_phy_param(phydev, 0x8ff4, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x8ff6, 0xffff, 0x0a4a);
+
+ rtl8125_phy_param(phydev, 0x8ff8, 0xffff, 0xaa5a);
+ rtl8125_phy_param(phydev, 0x88d5, 0xff00, 0x0200);
+
+ r8168g_phy_param(phydev, 0x84bb, 0xff00, 0x0a00);
+ r8168g_phy_param(phydev, 0x84c0, 0xff00, 0x1600);
+
+ phy_modify_paged(phydev, 0x0a43, 0x10, 0x0000, 0x0003);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_common_config_eee_phy(phydev);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1180,14 +1341,12 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config,
[RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
[RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_80] = rtl8127a_1_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 2bbfcad613ab..498cfe4d0cac 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -170,6 +170,7 @@ enum rtase_registers {
#define RTASE_TC_MODE_MASK GENMASK(11, 10)
RTASE_TOKSEL = 0x2046,
+ RTASE_TXQCRDT_0 = 0x2500,
RTASE_RFIFONFULL = 0x4406,
RTASE_INT_MITI_TX = 0x0A00,
RTASE_INT_MITI_RX = 0x0A80,
@@ -259,6 +260,12 @@ union rtase_rx_desc {
#define RTASE_VLAN_TAG_MASK GENMASK(15, 0)
#define RTASE_RX_PKT_SIZE_MASK GENMASK(13, 0)
+/* txqos hardware definitions */
+#define RTASE_1T_CLOCK 64
+#define RTASE_1T_POWER 10000000
+#define RTASE_IDLESLOPE_INT_SHIFT 25
+#define RTASE_IDLESLOPE_INT_MASK GENMASK(31, 25)
+
#define RTASE_IVEC_NAME_SIZE (IFNAMSIZ + 10)
struct rtase_int_vector {
@@ -294,6 +301,13 @@ struct rtase_ring {
u64 alloc_fail;
};
+struct rtase_txqos {
+ int hicredit;
+ int locredit;
+ int idleslope;
+ int sendslope;
+};
+
struct rtase_stats {
u64 tx_dropped;
u64 rx_dropped;
@@ -313,6 +327,7 @@ struct rtase_private {
struct page_pool *page_pool;
struct rtase_ring tx_ring[RTASE_NUM_TX_QUEUE];
+ struct rtase_txqos tx_qos[RTASE_NUM_TX_QUEUE];
struct rtase_ring rx_ring[RTASE_NUM_RX_QUEUE];
struct rtase_counters *tally_vaddr;
dma_addr_t tally_paddr;
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 55b8d3666153..4d37217e9a14 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1114,7 +1114,7 @@ static int rtase_open(struct net_device *dev)
/* request other interrupts to handle multiqueue */
for (i = 1; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
- snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+ snprintf(ivec->name, sizeof(ivec->name), "%s_int%u",
tp->dev->name, i);
ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
ivec->name, ivec);
@@ -1661,6 +1661,65 @@ static void rtase_get_stats64(struct net_device *dev,
stats->rx_length_errors = tp->stats.rx_length_errors;
}
+static void rtase_set_hw_cbs(const struct rtase_private *tp, u32 queue)
+{
+ u32 idle = tp->tx_qos[queue].idleslope * RTASE_1T_CLOCK;
+ u32 val, i;
+
+ val = u32_encode_bits(idle / RTASE_1T_POWER, RTASE_IDLESLOPE_INT_MASK);
+ idle %= RTASE_1T_POWER;
+
+ for (i = 1; i <= RTASE_IDLESLOPE_INT_SHIFT; i++) {
+ idle *= 2;
+ if ((idle / RTASE_1T_POWER) == 1)
+ val |= BIT(RTASE_IDLESLOPE_INT_SHIFT - i);
+
+ idle %= RTASE_1T_POWER;
+ }
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, val);
+}
+
+static int rtase_setup_tc_cbs(struct rtase_private *tp,
+ const struct tc_cbs_qopt_offload *qopt)
+{
+ int queue = qopt->queue;
+
+ if (queue < 0 || queue >= tp->func_tx_queue_num)
+ return -EINVAL;
+
+ if (!qopt->enable) {
+ tp->tx_qos[queue].hicredit = 0;
+ tp->tx_qos[queue].locredit = 0;
+ tp->tx_qos[queue].idleslope = 0;
+ tp->tx_qos[queue].sendslope = 0;
+
+ rtase_w32(tp, RTASE_TXQCRDT_0 + queue * 4, 0);
+ } else {
+ tp->tx_qos[queue].hicredit = qopt->hicredit;
+ tp->tx_qos[queue].locredit = qopt->locredit;
+ tp->tx_qos[queue].idleslope = qopt->idleslope;
+ tp->tx_qos[queue].sendslope = qopt->sendslope;
+
+ rtase_set_hw_cbs(tp, queue);
+ }
+
+ return 0;
+}
+
+static int rtase_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct rtase_private *tp = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return rtase_setup_tc_cbs(tp, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static netdev_features_t rtase_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1696,6 +1755,7 @@ static const struct net_device_ops rtase_netdev_ops = {
.ndo_change_mtu = rtase_change_mtu,
.ndo_tx_timeout = rtase_tx_timeout,
.ndo_get_stats64 = rtase_get_stats64,
+ .ndo_setup_tc = rtase_setup_tc,
.ndo_fix_features = rtase_fix_features,
.ndo_set_features = rtase_set_features,
};
@@ -1923,7 +1983,7 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
u8 msb, time_count, time_unit;
u16 int_miti;
- time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
+ time_us = min(time_us, RTASE_MITI_MAX_TIME);
if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
msb = fls(time_us);
@@ -1945,7 +2005,7 @@ static u16 rtase_calc_packet_num_mitigation(u16 pkt_num)
u8 msb, pkt_num_count, pkt_num_unit;
u16 int_miti;
- pkt_num = min_t(int, pkt_num, RTASE_MITI_MAX_PKT_NUM);
+ pkt_num = min(pkt_num, RTASE_MITI_MAX_PKT_NUM);
if (pkt_num > 60) {
pkt_num_unit = RTASE_MITI_MAX_PKT_NUM_IDX;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index b4365906669f..226c6c0ab945 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -176,12 +176,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
struct net_device *ndev = priv->ndev;
unsigned long flags;
- /* Reject requests with unsupported flags */
- if (req->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE))
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -212,10 +206,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
unsigned long flags;
int error = 0;
- /* Reject requests with unsupported flags */
- if (req->flags)
- return -EOPNOTSUPP;
-
if (req->index)
return -EINVAL;
@@ -287,6 +277,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
.max_adj = 50000000,
.n_ext_ts = N_EXT_TS,
.n_per_out = N_PER_OUT,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE,
.adjfine = ravb_ptp_adjfine,
.adjtime = ravb_ptp_adjtime,
.gettime64 = ravb_ptp_gettime64,
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index d5db26103d82..19985d13e243 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1933,7 +1933,7 @@ static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
- if (!found != !removing) {
+ if (!found == removing) {
kfree(fdb);
if (!found && removing)
return 0;
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 332cbd725900..df869f82cae8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
/* We do a request_region() to register /proc/ioports info. */
- ret = pci_request_regions(pci_dev, "sis900");
+ ret = pcim_request_all_regions(pci_dev, "sis900");
if (ret)
goto err_out;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 3c820ef56775..67fa879b1e52 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,6 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on ETHTOOL_NETLINK
select MII
select PCS_XPCS
select PAGE_POOL
@@ -131,6 +132,17 @@ config DWMAC_QCOM_ETHQOS
This selects the Qualcomm ETHQOS glue layer support for the
stmmac device driver.
+config DWMAC_RENESAS_GBETH
+ tristate "Renesas RZ/V2H(P) GBETH support"
+ default ARCH_RENESAS
+ depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ help
+ Support for Gigabit Ethernet Interface (GBETH) on Renesas
+ RZ/V2H(P) SoCs.
+
+ This selects the Renesas RZ/V2H(P) Soc specific glue layer support
+ for the stmmac device driver.
+
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 594883fb4164..b591d93f8503 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o stmmac_est.o stmmac_fpe.o \
+ stmmac_xdp.o stmmac_est.o stmmac_fpe.o stmmac_vlan.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
@@ -20,6 +20,7 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_RENESAS_GBETH) += dwmac-renesas-gbeth.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 412b07e77945..ea5da5793362 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -602,6 +602,7 @@ struct mac_device_info {
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
+ const struct stmmac_vlan_ops *vlan;
struct dw_xpcs *xpcs;
struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 37fe7c288878..84072c8ed741 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -65,13 +65,12 @@ anarion_config_dt(struct platform_device *pdev,
{
struct anarion_gmac *gmac;
void __iomem *ctl_block;
- int err;
ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
- err = PTR_ERR(ctl_block);
- dev_err(&pdev->dev, "Cannot get reset region (%d)!\n", err);
- return ERR_PTR(err);
+ dev_err(&pdev->dev, "Cannot get reset region (%pe)!\n",
+ ctl_block);
+ return ERR_CAST(ctl_block);
}
gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
@@ -80,17 +79,11 @@ anarion_config_dt(struct platform_device *pdev,
gmac->ctl_block = ctl_block;
- switch (plat_dat->phy_interface) {
- case PHY_INTERFACE_MODE_RGMII:
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(plat_dat->phy_interface)) {
gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- plat_dat->phy_interface);
+ } else {
+ dev_err(&pdev->dev, "Unsupported phy-mode (%s)\n",
+ phy_modes(plat_dat->phy_interface));
return ERR_PTR(-ENOTSUPP);
}
@@ -118,10 +111,9 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
- anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
@@ -132,7 +124,6 @@ MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
static struct platform_driver anarion_dwmac_driver = {
.probe = anarion_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "anarion-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index cd431f84f34f..09ae16e026eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,21 +29,10 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_slave;
struct gpio_desc *reset;
};
-static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat,
- const char *name)
-{
- for (int i = 0; i < plat_dat->num_clks; i++)
- if (strcmp(plat_dat->clks[i].id, name) == 0)
- return plat_dat->clks[i].clk;
-
- return NULL;
-}
-
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
@@ -132,7 +121,7 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk");
+ plat_dat->pclk = stmmac_pltfr_find_clk(plat_dat, "phy_ref_clk");
return 0;
}
@@ -147,10 +136,11 @@ static int dwc_qos_probe(struct platform_device *pdev,
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *bsp_priv, int speed, unsigned int mode)
{
- struct tegra_eqos *eqos = priv;
+ struct tegra_eqos *eqos = bsp_priv;
bool needs_calibration = false;
+ struct stmmac_priv *priv;
u32 value;
int err;
@@ -169,6 +159,11 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
}
if (needs_calibration) {
+ priv = netdev_priv(dev_get_drvdata(eqos->dev));
+
+ /* Calibration should be done with the MDIO bus idle */
+ mutex_lock(&priv->mii->mdio_lock);
+
/* calibrate */
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
@@ -202,6 +197,8 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
value = readl(eqos->regs + SDMEMCOMPPADCTRL);
value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ mutex_unlock(&priv->mii->mdio_lock);
} else {
value = readl(eqos->regs + AUTO_CAL_CONFIG);
value &= ~AUTO_CAL_CONFIG_ENABLE;
@@ -209,20 +206,6 @@ static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
}
}
-static int tegra_eqos_init(struct platform_device *pdev, void *priv)
-{
- struct tegra_eqos *eqos = priv;
- unsigned long rate;
- u32 value;
-
- rate = clk_get_rate(eqos->clk_slave);
-
- value = (rate / 1000000) - 1;
- writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
-
- return 0;
-}
-
static int tegra_eqos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
@@ -237,12 +220,11 @@ static int tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
- eqos->clk_slave = plat_dat->stmmac_clk;
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx");
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
@@ -277,17 +259,12 @@ static int tegra_eqos_probe(struct platform_device *pdev,
bypass_clk_reset_gpio:
plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
- plat_dat->init = tegra_eqos_init;
plat_dat->bsp_priv = eqos;
- plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
-
- err = tegra_eqos_init(pdev, eqos);
- if (err < 0)
- goto reset;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
return 0;
-reset:
- reset_control_assert(eqos->rst);
+
reset_phy:
gpiod_set_value(eqos->reset, 1);
@@ -362,8 +339,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
- plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat,
- data->stmmac_clk_name);
+ plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
+ data->stmmac_clk_name);
if (data->probe)
ret = data->probe(pdev, plat_dat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 5d279fa54b3e..889e2bb6f7f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -379,10 +379,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = imx_dwmac_init(pdev, dwmac);
- if (ret)
- goto err_dwmac_init;
-
if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
} else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
@@ -392,16 +388,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
- goto err_drv_probe;
-
- return 0;
+ imx_dwmac_clks_config(dwmac, false);
-err_drv_probe:
- imx_dwmac_exit(pdev, plat_dat->bsp_priv);
-err_dwmac_init:
- imx_dwmac_clks_config(dwmac, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 066783d66422..15abe214131f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -56,6 +56,7 @@ enum ingenic_mac_version {
struct ingenic_mac {
const struct ingenic_soc_info *soc_info;
+ struct plat_stmmacenet_data *plat_dat;
struct device *dev;
struct regmap *regmap;
@@ -70,13 +71,13 @@ struct ingenic_soc_info {
int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
};
-static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
+static int ingenic_mac_init(struct platform_device *pdev, void *bsp_priv)
{
- struct ingenic_mac *mac = plat_dat->bsp_priv;
+ struct ingenic_mac *mac = bsp_priv;
int ret;
if (mac->soc_info->set_mode) {
- ret = mac->soc_info->set_mode(plat_dat);
+ ret = mac->soc_info->set_mode(mac->plat_dat);
if (ret)
return ret;
}
@@ -284,44 +285,14 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->soc_info = data;
mac->dev = &pdev->dev;
+ mac->plat_dat = plat_dat;
plat_dat->bsp_priv = mac;
+ plat_dat->init = ingenic_mac_init;
- ret = ingenic_mac_init(plat_dat);
- if (ret)
- return ret;
-
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ingenic_mac_suspend(struct device *dev)
-{
- int ret;
-
- ret = stmmac_suspend(dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int ingenic_mac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = ingenic_mac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
-
static struct ingenic_soc_info jz4775_soc_info = {
.version = ID_JZ4775,
.mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
@@ -370,10 +341,9 @@ MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
static struct platform_driver ingenic_mac_driver = {
.probe = ingenic_mac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "ingenic-mac",
- .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = ingenic_mac_of_matches,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index 599def7b3a64..4ea7b0a803d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -113,16 +113,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
plat_dat->clk_tx_i = dwmac->tx_clk;
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
-
plat_dat->bsp_priv = dwmac;
- plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
-
- if (plat_dat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
- }
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index c8bb9265bbb4..9a47015254bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -284,28 +284,28 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
-static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+static void tgl_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
{
- struct intel_priv_data *intel_priv = intel_data;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int serdes_phy_addr = 0;
- u32 data = 0;
-
- serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ struct intel_priv_data *intel_priv = bsp_priv;
+ phy_interface_t interface;
+ int data;
/* Determine the link speed mode: 2.5Gbps/1Gbps */
- data = mdiobus_read(priv->mii, serdes_phy_addr,
- SERDES_GCR);
+ data = mdiobus_read(priv->mii, intel_priv->mdio_adhoc_addr, SERDES_GCR);
+ if (data < 0)
+ return;
- if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
- SERDES_LINK_MODE_2G5) {
+ if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) {
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
priv->plat->mdio_bus_data->default_an_inband = false;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
} else {
- priv->plat->max_speed = 1000;
+ interface = PHY_INTERFACE_MODE_SGMII;
}
+
+ __set_bit(interface, interfaces);
+ priv->plat->phy_interface = interface;
}
/* Program PTP Clock Frequency for different variant of
@@ -682,7 +682,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_blen[2] = 16;
plat->ptp_max_adj = plat->clk_ptp_rate;
- plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
@@ -933,7 +932,7 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->rx_queues_to_use = 6;
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 204800000;
- plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->get_interfaces = tgl_get_interfaces;
plat->safety_feat_cfg->tsoee = 1;
plat->safety_feat_cfg->mrxpee = 0;
@@ -952,7 +951,6 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -966,7 +964,6 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -980,7 +977,6 @@ static int adls_sgmii_phy0_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -995,7 +991,6 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
/* SerDes power up and power down are done in BIOS for ADL */
@@ -1313,13 +1308,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- if (plat->eee_usecs_rate > 0) {
- u32 tx_lpi_usec;
-
- tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
- writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
- }
-
ret = stmmac_config_multi_msi(pdev, plat, &res);
if (ret) {
ret = stmmac_config_single_msi(pdev, plat, &res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index a12f8e65f89f..7511c224b312 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -21,7 +21,6 @@
#define SERDES_RATE_MASK GENMASK(9, 8)
#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
-#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 1a93787056a7..e1591e6217d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -66,12 +66,14 @@
DMA_STATUS_TPS | DMA_STATUS_TI | \
DMA_STATUS_MSK_COMMON_LOONGSON)
-#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC1 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GMAC2 0x7a23
#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
-#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
-#define CHANNEL_NUM 8
+#define DWMAC_CORE_MULTICHAN_V1 0x10 /* Loongson custom ID 0x10 */
+#define DWMAC_CORE_MULTICHAN_V2 0x12 /* Loongson custom ID 0x12 */
struct loongson_data {
+ u32 multichan;
u32 loongson_id;
struct device *dev;
};
@@ -83,6 +85,8 @@ struct stmmac_pci_info {
static void loongson_default_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct loongson_data *ld = plat->bsp_priv;
+
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
@@ -116,31 +120,37 @@ static void loongson_default_data(struct pci_dev *pdev,
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
-}
-
-static int loongson_gmac_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
- loongson_default_data(pdev, plat);
-
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
+ switch (ld->loongson_id) {
+ case DWMAC_CORE_MULTICHAN_V1:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
/* Only channel 0 supports checksum,
* so turn off checksum to enable multiple channels.
*/
- for (i = 1; i < CHANNEL_NUM; i++)
+ for (int i = 1; i < 8; i++)
plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
+
+ break;
+ case DWMAC_CORE_MULTICHAN_V2:
+ ld->multichan = 1;
+ plat->rx_queues_to_use = 4;
+ plat->tx_queues_to_use = 4;
+ break;
+ default:
+ ld->multichan = 0;
plat->tx_queues_to_use = 1;
plat->rx_queues_to_use = 1;
+ break;
}
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ loongson_default_data(pdev, plat);
plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
@@ -172,27 +182,8 @@ static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
static int loongson_gnet_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
- struct loongson_data *ld;
- int i;
-
- ld = plat->bsp_priv;
-
loongson_default_data(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
- plat->rx_queues_to_use = CHANNEL_NUM;
- plat->tx_queues_to_use = CHANNEL_NUM;
-
- /* Only channel 0 supports checksum,
- * so turn off checksum to enable multiple channels.
- */
- for (i = 1; i < CHANNEL_NUM; i++)
- plat->tx_queues_cfg[i].coe_unsupported = 1;
- } else {
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
- }
-
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
plat->fix_mac_speed = loongson_gnet_fix_speed;
@@ -350,14 +341,14 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
return NULL;
/* The Loongson GMAC and GNET devices are based on the DW GMAC
- * v3.50a and v3.73a IP-cores. But the HW designers have changed the
- * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
- * network controllers with the multi-channels feature
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed
+ * the GMAC_VERSION.SNPSVER field to the custom 0x10/0x12 value
+ * on the network controllers with the multi-channels feature
* available to emphasize the differences: multiple DMA-channels,
* AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
* original value so the correct HW-interface would be selected.
*/
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ if (ld->multichan) {
priv->synopsys_id = DWMAC_CORE_3_70;
*dma = dwmac1000_dma_ops;
dma->init_chan = loongson_dwmac_dma_init_channel;
@@ -378,13 +369,13 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
- /* Loongson GMAC doesn't support the flow control. LS2K2000
- * GNET doesn't support the half-duplex link mode.
+ /* Loongson GMAC doesn't support the flow control. Loongson GNET
+ * without multi-channel doesn't support the half-duplex link mode.
*/
- if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ if (pdev->device != PCI_DEVICE_ID_LOONGSON_GNET) {
mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
} else {
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000;
else
@@ -413,9 +404,11 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int i, ret, vecs;
+ int i, ch_num, ret, vecs;
+
+ ch_num = min(plat->tx_queues_to_use, plat->rx_queues_to_use);
- vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ vecs = roundup_pow_of_two(ch_num * 2 + 1);
ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
if (ret < 0) {
dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
@@ -424,14 +417,12 @@ static int loongson_dwmac_msi_config(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, 0);
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- res->rx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 1 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->rx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 1 + i * 2);
}
- for (i = 0; i < plat->tx_queues_to_use; i++) {
- res->tx_irq[CHANNEL_NUM - 1 - i] =
- pci_irq_vector(pdev, 2 + i * 2);
+ for (i = 0; i < ch_num; i++) {
+ res->tx_irq[ch_num - 1 - i] = pci_irq_vector(pdev, 2 + i * 2);
}
plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
@@ -593,7 +584,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
goto err_disable_device;
/* Use the common MAC IRQ if per-channel MSIs allocation failed */
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
@@ -605,7 +596,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
err_plat_clear:
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -624,7 +615,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (dev_of_node(&pdev->dev))
loongson_dwmac_dt_clear(pdev, priv->plat);
- if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ if (ld->multichan)
loongson_dwmac_msi_clear(pdev);
pci_disable_device(pdev);
@@ -669,7 +660,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
loongson_dwmac_resume);
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
{ PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index d178d5ddc7c7..39421d6a34e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -581,7 +581,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
int i;
priv_plat->phy_mode = plat->phy_interface;
- plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 0e4da216f942..e30bdf72331a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,12 +106,11 @@ struct qcom_ethqos {
struct platform_device *pdev;
void __iomem *rgmii_base;
void __iomem *mac_base;
- int (*configure_func)(struct qcom_ethqos *ethqos);
+ int (*configure_func)(struct qcom_ethqos *ethqos, int speed);
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -385,7 +384,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
return 0;
}
-static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
int phase_shift;
@@ -412,7 +411,7 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
0, RGMII_IO_MACRO_CONFIG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_1000:
rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
@@ -532,14 +531,14 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
loopback, RGMII_IO_MACRO_CONFIG);
break;
default:
- dev_err(dev, "Invalid speed %d\n", ethqos->speed);
+ dev_err(dev, "Invalid speed %d\n", speed);
return -EINVAL;
}
return 0;
}
-static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
volatile unsigned int dll_lock;
@@ -562,7 +561,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
- if (ethqos->speed == SPEED_1000) {
+ if (speed == SPEED_1000) {
rgmii_writel(ethqos, 0x1800000, SDCC_TEST_CTL);
rgmii_writel(ethqos, 0x2C010800, SDCC_USR_CTL);
rgmii_writel(ethqos, 0xA001, SDCC_HC_REG_DLL_CONFIG2);
@@ -580,7 +579,7 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
SDCC_HC_REG_DLL_CONFIG);
- if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
@@ -607,10 +606,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
- if (ethqos->speed == SPEED_1000)
+ if (speed == SPEED_1000)
ethqos_dll_configure(ethqos);
- ethqos_rgmii_macro_init(ethqos);
+ ethqos_rgmii_macro_init(ethqos, speed);
return 0;
}
@@ -626,7 +625,7 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
-static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
+static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
{
struct net_device *dev = platform_get_drvdata(ethqos->pdev);
struct stmmac_priv *priv = netdev_priv(dev);
@@ -634,7 +633,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
val = readl(ethqos->mac_base + MAC_CTRL_REG);
- switch (ethqos->speed) {
+ switch (speed) {
case SPEED_2500:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
@@ -673,17 +672,9 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
return val;
}
-static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data)
+static int ethqos_configure(struct qcom_ethqos *ethqos, int speed)
{
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- priv->plat->max_speed = 2500;
- priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
-}
-
-static int ethqos_configure(struct qcom_ethqos *ethqos)
-{
- return ethqos->configure_func(ethqos);
+ return ethqos->configure_func(ethqos, speed);
}
static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
@@ -691,9 +682,8 @@ static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
struct qcom_ethqos *ethqos = priv;
qcom_ethqos_set_sgmii_loopback(ethqos, false);
- ethqos->speed = speed;
ethqos_update_link_clk(ethqos, speed);
- ethqos_configure(ethqos);
+ ethqos_configure(ethqos, speed);
}
static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
@@ -709,7 +699,7 @@ static int qcom_ethqos_serdes_powerup(struct net_device *ndev, void *priv)
if (ret)
return ret;
- return phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ return phy_set_speed(ethqos->serdes_phy, ethqos->serdes_speed);
}
static void qcom_ethqos_serdes_powerdown(struct net_device *ndev, void *priv)
@@ -803,8 +793,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->configure_func = ethqos_configure_rgmii;
break;
case PHY_INTERFACE_MODE_2500BASEX:
- plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500;
- fallthrough;
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -847,7 +835,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(ethqos->serdes_phy),
"Failed to get serdes phy\n");
- ethqos->speed = SPEED_1000;
ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
new file mode 100644
index 000000000000..9a774046455b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dwmac-renesas-gbeth.c - DWMAC Specific Glue layer for Renesas GBETH
+ *
+ * The Rx and Tx clocks are supplied as follows for the GBETH IP.
+ *
+ * Rx / Tx
+ * -------+------------- on / off -------
+ * |
+ * | Rx-180 / Tx-180
+ * +---- not ---- on / off -------
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "stmmac_platform.h"
+
+struct renesas_gbeth {
+ struct plat_stmmacenet_data *plat_dat;
+ struct reset_control *rstc;
+ struct device *dev;
+};
+
+static const char *const renesas_gbeth_clks[] = {
+ "tx", "tx-180", "rx", "rx-180",
+};
+
+static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ ret = reset_control_deassert(gbeth->rstc);
+ if (ret) {
+ dev_err(gbeth->dev, "Reset deassert failed\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks,
+ plat_dat->clks);
+ if (ret)
+ reset_control_assert(gbeth->rstc);
+
+ return ret;
+}
+
+static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct renesas_gbeth *gbeth = priv;
+ int ret;
+
+ plat_dat = gbeth->plat_dat;
+
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
+
+ ret = reset_control_assert(gbeth->rstc);
+ if (ret)
+ dev_err(gbeth->dev, "Reset assert failed\n");
+}
+
+static int renesas_gbeth_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct renesas_gbeth *gbeth;
+ unsigned int i;
+ int err;
+
+ err = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to get resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ gbeth = devm_kzalloc(dev, sizeof(*gbeth), GFP_KERNEL);
+ if (!gbeth)
+ return -ENOMEM;
+
+ plat_dat->num_clks = ARRAY_SIZE(renesas_gbeth_clks);
+ plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
+ sizeof(*plat_dat->clks), GFP_KERNEL);
+ if (!plat_dat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < plat_dat->num_clks; i++)
+ plat_dat->clks[i].id = renesas_gbeth_clks[i];
+
+ err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
+ if (err < 0)
+ return err;
+
+ plat_dat->clk_tx_i = stmmac_pltfr_find_clk(plat_dat, "tx");
+ if (!plat_dat->clk_tx_i)
+ return dev_err_probe(dev, -EINVAL,
+ "error finding tx clock\n");
+
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+
+ gbeth->dev = dev;
+ gbeth->plat_dat = plat_dat;
+ plat_dat->bsp_priv = gbeth;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = renesas_gbeth_init;
+ plat_dat->exit = renesas_gbeth_exit;
+ plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_SPH_DISABLE;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id renesas_gbeth_match[] = {
+ { .compatible = "renesas,rzv2h-gbeth", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
+
+static struct platform_driver renesas_gbeth_driver = {
+ .probe = renesas_gbeth_probe,
+ .driver = {
+ .name = "renesas-gbeth",
+ .of_match_table = renesas_gbeth_match,
+ },
+};
+module_platform_driver(renesas_gbeth_driver);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas GBETH DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 116855658559..72b50f6d72f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -50,6 +50,7 @@ struct socfpga_dwmac {
u32 reg_offset;
u32 reg_shift;
struct device *dev;
+ struct plat_stmmacenet_data *plat_dat;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ocp_rst;
@@ -58,17 +59,15 @@ struct socfpga_dwmac {
void __iomem *sgmii_adapter_base;
bool f2h_ptp_ref_clk;
const struct socfpga_dwmac_ops *ops;
- struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *bsp_priv, int speed,
+ unsigned int mode)
{
- struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)bsp_priv;
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dwmac->dev));
void __iomem *splitter_base = dwmac->splitter_base;
void __iomem *sgmii_adapter_base = dwmac->sgmii_adapter_base;
- struct device *dev = dwmac->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct phy_device *phy_dev = ndev->phydev;
u32 val;
if (sgmii_adapter_base)
@@ -95,7 +94,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (phy_dev && sgmii_adapter_base)
+ if ((priv->plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ priv->plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) &&
+ sgmii_adapter_base)
writew(SGMII_ADAPTER_ENABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
}
@@ -233,10 +234,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- struct net_device *ndev = dev_get_drvdata(dwmac->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return priv->plat->mac_interface;
+ return dwmac->plat_dat->mac_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
@@ -258,6 +256,7 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
case PHY_INTERFACE_MODE_RMII:
@@ -435,6 +434,13 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
+static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+{
+ struct socfpga_dwmac *dwmac = bsp_priv;
+
+ return dwmac->ops->set_phy_mode(dwmac);
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -442,8 +448,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
struct socfpga_dwmac *dwmac;
- struct net_device *ndev;
- struct stmmac_priv *stpriv;
const struct socfpga_dwmac_ops *ops;
ops = device_get_match_data(&pdev->dev);
@@ -479,9 +483,17 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ /* The socfpga driver needs to control the stmmac reset to set the phy
+ * mode. Create a copy of the core reset handle so it can be used by
+ * the driver later.
+ */
+ dwmac->stmmac_rst = plat_dat->stmmac_rst;
dwmac->ops = ops;
+ dwmac->plat_dat = plat_dat;
+
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->init = socfpga_dwmac_init;
plat_dat->pcs_init = socfpga_dwmac_pcs_init;
plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
plat_dat->select_pcs = socfpga_dwmac_select_pcs;
@@ -489,67 +501,9 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->riwt_off = 1;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- return ret;
-
- ndev = platform_get_drvdata(pdev);
- stpriv = netdev_priv(ndev);
-
- /* The socfpga driver needs to control the stmmac reset to set the phy
- * mode. Create a copy of the core reset handle so it can be used by
- * the driver later.
- */
- dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
-
- ret = ops->set_phy_mode(dwmac);
- if (ret)
- goto err_dvr_remove;
-
- return 0;
-
-err_dvr_remove:
- stmmac_dvr_remove(&pdev->dev);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-#ifdef CONFIG_PM_SLEEP
-static int socfpga_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
-
- dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- stmmac_bus_clks_config(priv, false);
-
- return 0;
-}
-
-static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- return stmmac_bus_clks_config(priv, true);
-}
-
-static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
- SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
-};
-
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
};
@@ -567,10 +521,9 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &socfpga_dwmac_pm_ops,
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index be57c6c12c1c..53d5ce1f6dc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -23,12 +23,7 @@
#define DWMAC_50MHZ 50000000
-#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
- iface == PHY_INTERFACE_MODE_RGMII_ID || \
- iface == PHY_INTERFACE_MODE_RGMII_RXID || \
- iface == PHY_INTERFACE_MODE_RGMII_TXID)
-
-#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+#define IS_PHY_IF_MODE_GBIT(iface) (phy_interface_mode_is_rgmii(iface) || \
iface == PHY_INTERFACE_MODE_GMII)
/* STiH4xx register definitions (STiH407/STiH410 families)
@@ -148,7 +143,7 @@ static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
src = TX_RETIME_SRC_CLKGEN;
freq = DWMAC_50MHZ;
}
- } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ } else if (phy_interface_mode_is_rgmii(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
freq = rgmii_clock(spd);
@@ -238,6 +233,29 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
+static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+ int ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = sti_dwmac_set_mode(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv)
+{
+ struct sti_dwmac *dwmac = bsp_priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
static int sti_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -274,59 +292,12 @@ static int sti_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = data->fix_retime_src;
+ plat_dat->init = sti_dwmac_init;
+ plat_dat->exit = sti_dwmac_exit;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
-
- ret = sti_dwmac_set_mode(dwmac);
- if (ret)
- goto disable_clk;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto disable_clk;
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static void sti_dwmac_remove(struct platform_device *pdev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-
- stmmac_dvr_remove(&pdev->dev);
-
- clk_disable_unprepare(dwmac->clk);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
-static int sti_dwmac_suspend(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
-
- clk_disable_unprepare(dwmac->clk);
-
- return ret;
-}
-
-static int sti_dwmac_resume(struct device *dev)
-{
- struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-
- clk_prepare_enable(dwmac->clk);
- sti_dwmac_set_mode(dwmac);
-
- return stmmac_resume(dev);
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
-
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
};
@@ -339,10 +310,9 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match);
static struct platform_driver sti_dwmac_driver = {
.probe = sti_dwmac_probe,
- .remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = pm_sleep_ptr(&sti_dwmac_pm_ops),
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index c3d321192581..1eb16eec9c0d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -119,7 +119,7 @@ struct stm32_ops {
u32 syscfg_clr_off;
};
-static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
+static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac)
{
int ret;
@@ -127,11 +127,9 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
if (ret)
goto err_clk_tx;
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) {
- ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret)
- goto err_clk_rx;
- }
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_rx;
ret = clk_prepare_enable(dwmac->syscfg_clk);
if (ret)
@@ -148,15 +146,14 @@ static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
err_clk_eth_ck:
clk_disable_unprepare(dwmac->syscfg_clk);
err_syscfg_clk:
- if (!dwmac->ops->clk_rx_enable_in_suspend || !resume)
- clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_rx);
err_clk_rx:
clk_disable_unprepare(dwmac->clk_tx);
err_clk_tx:
return ret;
}
-static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
int ret;
@@ -167,7 +164,7 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return ret;
}
- return stm32_dwmac_clk_enable(dwmac, resume);
+ return stm32_dwmac_clk_enable(dwmac);
}
static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
@@ -382,12 +379,10 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
SYSCFG_MCU_ETH_MASK, val << 23);
}
-static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
{
clk_disable_unprepare(dwmac->clk_tx);
- if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend)
- clk_disable_unprepare(dwmac->clk_rx);
-
+ clk_disable_unprepare(dwmac->clk_rx);
clk_disable_unprepare(dwmac->syscfg_clk);
if (dwmac->enable_eth_ck)
clk_disable_unprepare(dwmac->clk_eth_ck);
@@ -541,18 +536,32 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
- ret = stm32_dwmac_init(plat_dat, false);
+ ret = stm32_dwmac_init(plat_dat);
if (ret)
return ret;
+ /* If this platform requires the clock to be running in suspend,
+ * prepare and enable the receive clock an additional time to keep
+ * it running.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret)
+ goto err_clk_disable;
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_clk_disable;
+ goto err_clk_disable_suspend;
return 0;
+err_clk_disable_suspend:
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
err_clk_disable:
- stm32_dwmac_clk_disable(dwmac, false);
+ stm32_dwmac_clk_disable(dwmac);
return ret;
}
@@ -565,7 +574,15 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- stm32_dwmac_clk_disable(dwmac, false);
+ /* If this platform requires the clock to be running in suspend,
+ * we need to disable and unprepare the receive clock an additional
+ * time to balance the extra clk_prepare_enable() in the probe
+ * function.
+ */
+ if (dwmac->ops->clk_rx_enable_in_suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->irq_pwr_wakeup >= 0) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -596,7 +613,7 @@ static int stm32_dwmac_suspend(struct device *dev)
if (ret)
return ret;
- stm32_dwmac_clk_disable(dwmac, true);
+ stm32_dwmac_clk_disable(dwmac);
if (dwmac->ops->suspend)
ret = dwmac->ops->suspend(dwmac);
@@ -614,7 +631,7 @@ static int stm32_dwmac_resume(struct device *dev)
if (dwmac->ops->resume)
dwmac->ops->resume(dwmac);
- ret = stm32_dwmac_init(priv->plat, true);
+ ret = stm32_dwmac_init(priv->plat);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 85723a78793a..2796dc426943 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
- reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
} else {
/* For SoCs without internal PHY the PHY selection bit should be
* set to 0 (external PHY).
@@ -1239,14 +1239,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
if (ret)
goto dwmac_syscon;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto dwmac_exit;
-
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
@@ -1283,9 +1279,7 @@ dwmac_mux:
clk_put(gmac->ephy_clk);
dwmac_remove:
pm_runtime_put_noidle(&pdev->dev);
- stmmac_dvr_remove(&pdev->dev);
-dwmac_exit:
- sun8i_dwmac_exit(pdev, gmac);
+ stmmac_pltfr_remove(pdev);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9f098ff0ff05..1eadcf5d1ad6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,28 +72,28 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, int speed, unsigned int mode)
+static int sun7i_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct sunxi_priv_data *gmac = priv;
-
- /* only GMII mode requires us to reconfigure the clock lines */
- if (gmac->interface != PHY_INTERFACE_MODE_GMII)
- return;
-
- if (gmac->clk_enabled) {
- clk_disable(gmac->tx_clk);
- gmac->clk_enabled = 0;
- }
- clk_unprepare(gmac->tx_clk);
-
- if (speed == 1000) {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
- clk_prepare_enable(gmac->tx_clk);
- gmac->clk_enabled = 1;
- } else {
- clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
- clk_prepare(gmac->tx_clk);
+ struct sunxi_priv_data *gmac = bsp_priv;
+
+ if (interface == PHY_INTERFACE_MODE_GMII) {
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
}
+ return 0;
}
static int sun7i_gmac_probe(struct platform_device *pdev)
@@ -140,24 +140,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = gmac;
plat_dat->init = sun7i_gmac_init;
plat_dat->exit = sun7i_gmac_exit;
- plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->set_clk_tx_rate = sun7i_set_clk_tx_rate;
plat_dat->tx_fifo_size = 4096;
plat_dat->rx_fifo_size = 16384;
- ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_exit;
-
- return 0;
-
-err_gmac_exit:
- sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id sun7i_dwmac_match[] = {
@@ -168,7 +155,6 @@ MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
static struct platform_driver sun7i_dwmac_driver = {
.probe = sun7i_gmac_probe,
- .remove = stmmac_pltfr_remove,
.driver = {
.name = "sun7i-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 33cf99797df5..5e6ac82a89b9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -51,21 +51,14 @@ struct visconti_eth {
u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
- spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
+static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct visconti_eth *dwmac = priv;
+ struct visconti_eth *dwmac = bsp_priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
unsigned int val, clk_sel_val = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&dwmac->lock, flags);
-
- /* adjust link */
- val = readl(dwmac->reg + MAC_CTRL_REG);
- val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
switch (speed) {
case SPEED_1000:
@@ -77,24 +70,19 @@ static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
break;
case SPEED_10:
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- val |= GMAC_CONFIG_PS;
break;
default:
/* No bit control */
netdev_err(netdev, "Unsupported speed request (%d)", speed);
- spin_unlock_irqrestore(&dwmac->lock, flags);
- return;
+ return -EINVAL;
}
- writel(val, dwmac->reg + MAC_CTRL_REG);
-
/* Stop internal clock */
val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
@@ -136,7 +124,7 @@ static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
break;
}
- spin_unlock_irqrestore(&dwmac->lock, flags);
+ return 0;
}
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
@@ -228,11 +216,10 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = visconti_eth_set_clk_tx_rate;
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 42fe29a4e300..f4694fd576f5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -17,11 +17,7 @@
#define GMAC_EXT_CONFIG 0x00000004
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
-#define GMAC_VLAN_TAG 0x00000050
-#define GMAC_VLAN_TAG_DATA 0x00000054
-#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
-#define GMAC_VLAN_INCL 0x00000060
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_TXQ_PRTY_MAP0 0x98
#define GMAC_TXQ_PRTY_MAP1 0x9C
@@ -31,7 +27,6 @@
#define GMAC_RXQ_CTRL3 0x000000ac
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_1US_TIC_COUNTER 0x000000dc
#define GMAC_PCS_BASE 0x000000e0
#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
@@ -82,42 +77,6 @@
#define GMAC_MAX_PERFECT_ADDRESSES 128
-/* MAC VLAN */
-#define GMAC_VLAN_EDVLP BIT(26)
-#define GMAC_VLAN_VTHM BIT(25)
-#define GMAC_VLAN_DOVLTC BIT(20)
-#define GMAC_VLAN_ESVL BIT(18)
-#define GMAC_VLAN_ETV BIT(16)
-#define GMAC_VLAN_VID GENMASK(15, 0)
-#define GMAC_VLAN_VLTI BIT(20)
-#define GMAC_VLAN_CSVL BIT(19)
-#define GMAC_VLAN_VLC GENMASK(17, 16)
-#define GMAC_VLAN_VLC_SHIFT 16
-#define GMAC_VLAN_VLHT GENMASK(15, 0)
-
-/* MAC VLAN Tag */
-#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_ETV BIT(16)
-
-/* MAC VLAN Tag Control */
-#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
-#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
-#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
-#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
-#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
-#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
-#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
-
-#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
-
-/* MAC VLAN Tag Data/Filter */
-#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
-#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
-#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
-
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index cc4ddf608652..9c2549d4100f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -18,6 +18,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_pcs.h"
+#include "stmmac_vlan.h"
#include "dwmac4.h"
#include "dwmac5.h"
@@ -448,165 +449,6 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
-static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 val;
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~GMAC_VLAN_TAG_VID;
- val |= GMAC_VLAN_TAG_ETV | vid;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-}
-
-static int dwmac4_write_vlan_filter(struct net_device *dev,
- struct mac_device_info *hw,
- u8 index, u32 data)
-{
- void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- int ret;
- u32 val;
-
- if (index >= hw->num_vlan)
- return -EINVAL;
-
- writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
-
- val = readl(ioaddr + GMAC_VLAN_TAG);
- val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
- GMAC_VLAN_TAG_CTRL_CT |
- GMAC_VLAN_TAG_CTRL_OB);
- val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
-
- writel(val, ioaddr + GMAC_VLAN_TAG);
-
- ret = readl_poll_timeout(ioaddr + GMAC_VLAN_TAG, val,
- !(val & GMAC_VLAN_TAG_CTRL_OB),
- 1000, 500000);
- if (ret) {
- netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int index = -1;
- u32 val = 0;
- int i, ret;
-
- if (vid > 4095)
- return -EINVAL;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- /* For single VLAN filter, VID 0 means VLAN promiscuous */
- if (vid == 0) {
- netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
- return -EPERM;
- }
-
- if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
- netdev_err(dev, "Only single VLAN ID supported\n");
- return -EPERM;
- }
-
- hw->vlan_filter[0] = vid;
- dwmac4_write_single_vlan(dev, vid);
-
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
-
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] == val)
- return 0;
- else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
- index = i;
- }
-
- if (index == -1) {
- netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
- hw->num_vlan);
- return -EPERM;
- }
-
- ret = dwmac4_write_vlan_filter(dev, hw, index, val);
-
- if (!ret)
- hw->vlan_filter[index] = val;
-
- return ret;
-}
-
-static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid)
-{
- int i, ret = 0;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
- hw->vlan_filter[0] = 0;
- dwmac4_write_single_vlan(dev, 0);
- }
- return 0;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
- ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
-
- if (!ret)
- hw->vlan_filter[i] = 0;
- else
- return ret;
- }
- }
-
- return ret;
-}
-
-static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
- struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- u32 hash;
- u32 val;
- int i;
-
- /* Single Rx VLAN Filter */
- if (hw->num_vlan == 1) {
- dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
- return;
- }
-
- /* Extended Rx VLAN Filter Enable */
- for (i = 0; i < hw->num_vlan; i++) {
- if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
- val = hw->vlan_filter[i];
- dwmac4_write_vlan_filter(dev, hw, i, val);
- }
- }
-
- hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
- if (hash & GMAC_VLAN_VLHT) {
- value = readl(ioaddr + GMAC_VLAN_TAG);
- value |= GMAC_VLAN_VTHM;
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -965,45 +807,6 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
-
- value = readl(ioaddr + GMAC_VLAN_TAG);
-
- if (hash) {
- value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = GMAC_VLAN_ETV;
-
- if (is_double) {
- value |= GMAC_VLAN_EDVLP;
- value |= GMAC_VLAN_ESVL;
- value |= GMAC_VLAN_DOVLTC;
- }
-
- writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
- } else {
- value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
- value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
- value &= ~GMAC_VLAN_DOVLTC;
- value &= ~GMAC_VLAN_VID;
-
- writel(value, ioaddr + GMAC_VLAN_TAG);
- }
-}
-
static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
{
u32 value = readl(ioaddr + GMAC_CONFIG);
@@ -1014,19 +817,6 @@ static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + GMAC_CONFIG);
}
-static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + GMAC_VLAN_INCL);
- value |= GMAC_VLAN_VLTI;
- value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~GMAC_VLAN_VLC;
- value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
- writel(value, ioaddr + GMAC_VLAN_INCL);
-}
-
static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
u32 addr)
{
@@ -1143,35 +933,6 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
-static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
- struct dma_desc *rx_desc, struct sk_buff *skb)
-{
- if (hw->desc->get_rx_vlan_valid(rx_desc)) {
- u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
-
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
- }
-}
-
-static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
- value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
-
- if (hw->hw_vlan_en)
- /* Always strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_ALL;
- else
- /* Do not strip VLAN on Receive */
- value |= GMAC_VLAN_TAG_STRIP_NONE;
-
- /* Enable outer VLAN Tag in Rx DMA descriptor */
- value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
- writel(value, ioaddr + GMAC_VLAN_TAG);
-}
-
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.update_caps = dwmac4_update_caps,
@@ -1201,17 +962,10 @@ const struct stmmac_ops dwmac4_ops = {
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
@@ -1244,18 +998,11 @@ const struct stmmac_ops dwmac410_ops = {
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1292,51 +1039,13 @@ const struct stmmac_ops dwmac510_ops = {
.rxp_config = dwmac5_rxp_config,
.flex_pps_config = dwmac5_flex_pps_config,
.set_mac_loopback = dwmac4_set_mac_loopback,
- .update_vlan_hash = dwmac4_update_vlan_hash,
.sarc_configure = dwmac4_sarc_configure,
- .enable_vlan = dwmac4_enable_vlan,
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
.fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
- .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
- .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
- .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
- .rx_hw_vlan = dwmac4_rx_hw_vlan,
- .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
-static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
-{
- u32 val, num_vlan;
-
- val = readl(ioaddr + GMAC_HW_FEATURE3);
- switch (val & GMAC_HW_FEAT_NRVF) {
- case 0:
- num_vlan = 1;
- break;
- case 1:
- num_vlan = 4;
- break;
- case 2:
- num_vlan = 8;
- break;
- case 3:
- num_vlan = 16;
- break;
- case 4:
- num_vlan = 24;
- break;
- case 5:
- num_vlan = 32;
- break;
- default:
- num_vlan = 1;
- }
-
- return num_vlan;
-}
-
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1368,7 +1077,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
- mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a03f5d771566..0d408ee17f33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -57,19 +57,6 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
-#define XGMAC_VLAN_TAG 0x00000050
-#define XGMAC_VLAN_EDVLP BIT(26)
-#define XGMAC_VLAN_VTHM BIT(25)
-#define XGMAC_VLAN_DOVLTC BIT(20)
-#define XGMAC_VLAN_ESVL BIT(18)
-#define XGMAC_VLAN_ETV BIT(16)
-#define XGMAC_VLAN_VID GENMASK(15, 0)
-#define XGMAC_VLAN_HASH_TABLE 0x00000058
-#define XGMAC_VLAN_INCL 0x00000060
-#define XGMAC_VLAN_VLTI BIT(20)
-#define XGMAC_VLAN_CSVL BIT(19)
-#define XGMAC_VLAN_VLC GENMASK(17, 16)
-#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -477,6 +464,7 @@
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_RDES3_ET_LT GENMASK(19, 16)
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
#define XGMAC_L34T_IP6TCP 0x9
@@ -486,6 +474,17 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+/* RDES0 (write back format) */
+#define XGMAC_RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* Error Type or L2 Type(ET/LT) Field Number */
+#define XGMAC_ET_LT_VLAN_STAG 8
+#define XGMAC_ET_LT_VLAN_CTAG 9
+#define XGMAC_ET_LT_DVLAN_CTAG_CTAG 10
+#define XGMAC_ET_LT_DVLAN_STAG_STAG 11
+#define XGMAC_ET_LT_DVLAN_CTAG_STAG 12
+#define XGMAC_ET_LT_DVLAN_STAG_CTAG 13
+
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index a6d395c6bacd..6cadf8de4fdf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -10,6 +10,7 @@
#include "stmmac.h"
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
+#include "stmmac_vlan.h"
#include "dwxlgmac2.h"
#include "dwxgmac2.h"
@@ -614,76 +615,6 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
-static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double)
-{
- void __iomem *ioaddr = hw->pcsr;
-
- writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
-
- if (hash) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- } else if (perfect_match) {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value |= XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~XGMAC_VLAN_VTHM;
- value |= XGMAC_VLAN_ETV;
- if (is_double) {
- value |= XGMAC_VLAN_EDVLP;
- value |= XGMAC_VLAN_ESVL;
- value |= XGMAC_VLAN_DOVLTC;
- } else {
- value &= ~XGMAC_VLAN_EDVLP;
- value &= ~XGMAC_VLAN_ESVL;
- value &= ~XGMAC_VLAN_DOVLTC;
- }
-
- value &= ~XGMAC_VLAN_VID;
- writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
- } else {
- u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
-
- value &= ~XGMAC_FILTER_VTFE;
-
- writel(value, ioaddr + XGMAC_PACKET_FILTER);
-
- value = readl(ioaddr + XGMAC_VLAN_TAG);
-
- value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
- value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
- value &= ~XGMAC_VLAN_DOVLTC;
- value &= ~XGMAC_VLAN_VID;
-
- writel(value, ioaddr + XGMAC_VLAN_TAG);
- }
-}
-
struct dwxgmac3_error_desc {
bool valid;
const char *desc;
@@ -1300,19 +1231,6 @@ static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
writel(value, ioaddr + XGMAC_TX_CONFIG);
}
-static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
-
- value = readl(ioaddr + XGMAC_VLAN_INCL);
- value |= XGMAC_VLAN_VLTI;
- value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
- value &= ~XGMAC_VLAN_VLC;
- value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
- writel(value, ioaddr + XGMAC_VLAN_INCL);
-}
-
static int dwxgmac2_filter_wait(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1534,12 +1452,10 @@ const struct stmmac_ops dwxgmac210_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1590,12 +1506,10 @@ const struct stmmac_ops dwxlgmac2_ops = {
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
- .update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
- .enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
@@ -1638,6 +1552,7 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(15, 0);
mac->mii.clk_csr_shift = 19;
mac->mii.clk_csr_mask = GENMASK(21, 19);
+ mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 389aad7b5c1e..a2980482fcce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -4,6 +4,7 @@
* stmmac XGMAC support.
*/
+#include <linux/bitfield.h>
#include <linux/stmmac.h>
#include "common.h"
#include "dwxgmac2.h"
@@ -69,6 +70,21 @@ static int dwxgmac2_get_tx_ls(struct dma_desc *p)
return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
}
+static u16 dwxgmac2_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return le32_to_cpu(p->des0) & XGMAC_RDES0_VLAN_TAG_MASK;
+}
+
+static bool dwxgmac2_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ u32 et_lt;
+
+ et_lt = FIELD_GET(XGMAC_RDES3_ET_LT, le32_to_cpu(p->des3));
+
+ return et_lt >= XGMAC_ET_LT_VLAN_STAG &&
+ et_lt <= XGMAC_ET_LT_DVLAN_STAG_CTAG;
+}
+
static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
@@ -351,6 +367,8 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.set_tx_owner = dwxgmac2_set_tx_owner,
.set_rx_owner = dwxgmac2_set_rx_owner,
.get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_vlan_tci = dwxgmac2_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwxgmac2_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwxgmac2_get_rx_frame_len,
.enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
.get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 31bdbab9a46c..99635b37044a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,6 +9,7 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "stmmac_vlan.h"
#include "dwmac4_descs.h"
#include "dwxgmac2.h"
@@ -120,6 +121,7 @@ static const struct stmmac_hwif_entry {
const void *tc;
const void *mmc;
const void *est;
+ const void *vlan;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -175,6 +177,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -197,6 +200,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -219,6 +223,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -241,6 +246,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops,
+ .vlan = &dwmac_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = &dwmac4_ring_mode_ops,
@@ -264,6 +270,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxgmac210_ops,
+ .vlan = &dwxgmac210_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -287,6 +294,7 @@ static const struct stmmac_hwif_entry {
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
.mac = &dwxlgmac2_ops,
+ .vlan = &dwxlgmac2_vlan_ops,
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
@@ -368,6 +376,7 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->tc = mac->tc ? : entry->tc;
mac->mmc = mac->mmc ? : entry->mmc;
mac->est = mac->est ? : entry->est;
+ mac->vlan = mac->vlan ? : entry->vlan;
priv->hw = mac;
priv->fpe_cfg.reg = entry->regs.fpe_reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 27c63a9fc163..ae4efffb785f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -398,21 +398,6 @@ struct stmmac_ops {
/* RSS */
int (*rss_configure)(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq);
- /* VLAN */
- void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- u16 perfect_match, bool is_double);
- void (*enable_vlan)(struct mac_device_info *hw, u32 type);
- void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
- struct sk_buff *skb);
- void (*set_hw_vlan_mode)(struct mac_device_info *hw);
- int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw,
- __be16 proto, u16 vid);
- void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
- struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -498,20 +483,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
#define stmmac_rss_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, rss_configure, __args)
-#define stmmac_update_vlan_hash(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
-#define stmmac_enable_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
-#define stmmac_rx_hw_vlan(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
-#define stmmac_set_hw_vlan_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
-#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
-#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
-#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -659,6 +630,39 @@ struct stmmac_est_ops {
#define stmmac_est_irq_status(__priv, __args...) \
stmmac_do_void_callback(__priv, est, irq_status, __args)
+struct stmmac_vlan_ops {
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
+};
+
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, set_hw_vlan_mode, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, vlan, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, vlan, restore_hw_vlan_rx_fltr, __args)
+
struct stmmac_regs_off {
const struct stmmac_fpe_reg *fpe_reg;
u32 ptp_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index bddfa0f4aa21..cda09cf5dcca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -149,21 +149,9 @@ struct stmmac_channel {
};
struct stmmac_fpe_cfg {
- /* Serialize access to MAC Merge state between ethtool requests
- * and link state updates.
- */
- spinlock_t lock;
-
+ struct ethtool_mmsv mmsv;
const struct stmmac_fpe_reg *reg;
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-
- enum ethtool_mm_verify_status status;
- struct timer_list verify_timer;
- bool verify_enabled;
- int verify_retries;
- bool pmac_enabled;
- u32 verify_time;
- bool tx_enabled;
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_tc_entry {
@@ -313,7 +301,7 @@ struct stmmac_priv {
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_ops;
unsigned int default_addend;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 918a32f8fda8..f702f7b7bf9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -37,7 +37,7 @@
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int sizeof_stat;
int stat_offset;
};
@@ -1210,36 +1210,16 @@ static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned long flags;
u32 frag_size;
if (!stmmac_fpe_supported(priv))
return -EOPNOTSUPP;
- spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
-
- state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- state->verify_enabled = priv->fpe_cfg.verify_enabled;
- state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
- state->verify_time = priv->fpe_cfg.verify_time;
- state->tx_enabled = priv->fpe_cfg.tx_enabled;
- state->verify_status = priv->fpe_cfg.status;
state->rx_min_frag_size = ETH_ZLEN;
-
- /* FPE active if common tx_enabled and
- * (verification success or disabled(forced))
- */
- if (state->tx_enabled &&
- (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
- state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
- state->tx_active = true;
- else
- state->tx_active = false;
-
frag_size = stmmac_fpe_get_add_frag_size(priv);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
- spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+ ethtool_mmsv_get_mm(&priv->fpe_cfg.mmsv, state);
return 0;
}
@@ -1248,8 +1228,6 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
u32 frag_size;
int err;
@@ -1258,23 +1236,8 @@ static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
if (err)
return err;
- /* Wait for the verification that's currently in progress to finish */
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- fpe_cfg->verify_enabled = cfg->verify_enabled;
- fpe_cfg->pmac_enabled = cfg->pmac_enabled;
- fpe_cfg->verify_time = cfg->verify_time;
- fpe_cfg->tx_enabled = cfg->tx_enabled;
-
- if (!cfg->verify_enabled)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
-
stmmac_fpe_set_add_frag_size(priv, frag_size);
- stmmac_fpe_apply(priv);
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+ ethtool_mmsv_set_mm(&priv->fpe_cfg.mmsv, cfg);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
index 3a4bee029c7f..75b470ee621a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -27,12 +27,6 @@
#define STMMAC_MAC_FPE_CTRL_STS_SVER BIT(1)
#define STMMAC_MAC_FPE_CTRL_STS_EFPE BIT(0)
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
struct stmmac_fpe_reg {
const u32 mac_fpe_reg; /* offset of MAC_FPE_CTRL_STS */
const u32 mtl_fpe_reg; /* offset of MTL_FPE_CTRL_STS */
@@ -48,10 +42,10 @@ bool stmmac_fpe_supported(struct stmmac_priv *priv)
priv->hw->mac->fpe_map_preemption_class;
}
-static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
- bool pmac_enable)
+static void stmmac_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
{
- struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
const struct stmmac_fpe_reg *reg = cfg->reg;
u32 num_rxq = priv->plat->rx_queues_to_use;
void __iomem *ioaddr = priv->ioaddr;
@@ -68,6 +62,15 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
+}
+
+static void stmmac_fpe_configure_pmac(struct ethtool_mmsv *mmsv, bool pmac_enable)
+{
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
+ void __iomem *ioaddr = priv->ioaddr;
+ u32 value;
value = readl(ioaddr + reg->int_en_reg);
@@ -85,47 +88,45 @@ static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
writel(value, ioaddr + reg->int_en_reg);
}
-static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv,
- enum stmmac_mpacket_type type)
+static void stmmac_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
+ enum ethtool_mpacket type)
{
- const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
+ struct stmmac_fpe_cfg *cfg = container_of(mmsv, struct stmmac_fpe_cfg, mmsv);
+ struct stmmac_priv *priv = container_of(cfg, struct stmmac_priv, fpe_cfg);
+ const struct stmmac_fpe_reg *reg = cfg->reg;
void __iomem *ioaddr = priv->ioaddr;
- u32 value = priv->fpe_cfg.fpe_csr;
+ u32 value = cfg->fpe_csr;
- if (type == MPACKET_VERIFY)
+ if (type == ETHTOOL_MPACKET_VERIFY)
value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
- else if (type == MPACKET_RESPONSE)
+ else if (type == ETHTOOL_MPACKET_RESPONSE)
value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
writel(value, ioaddr + reg->mac_fpe_reg);
}
+static const struct ethtool_mmsv_ops stmmac_mmsv_ops = {
+ .configure_tx = stmmac_fpe_configure_tx,
+ .configure_pmac = stmmac_fpe_configure_pmac,
+ .send_mpacket = stmmac_fpe_send_mpacket,
+};
+
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ struct ethtool_mmsv *mmsv = &fpe_cfg->mmsv;
- /* This is interrupt context, just spin_lock() */
- spin_lock(&fpe_cfg->lock);
-
- if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
- goto unlock_out;
+ if (status == FPE_EVENT_UNKNOWN)
+ return;
- /* LP has sent verify mPacket */
if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
- stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE);
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET);
- /* Local has sent verify mPacket */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET);
- /* LP has sent response mPacket */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
- fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
-
-unlock_out:
- spin_unlock(&fpe_cfg->lock);
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ ethtool_mmsv_event_handle(mmsv, ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET);
}
void stmmac_fpe_irq_status(struct stmmac_priv *priv)
@@ -164,119 +165,16 @@ void stmmac_fpe_irq_status(struct stmmac_priv *priv)
stmmac_fpe_event_status(priv, status);
}
-/**
- * stmmac_fpe_verify_timer - Timer for MAC Merge verification
- * @t: timer_list struct containing private info
- *
- * Verify the MAC Merge capability in the local TX direction, by
- * transmitting Verify mPackets up to 3 times. Wait until link
- * partner responds with a Response mPacket, otherwise fail.
- */
-static void stmmac_fpe_verify_timer(struct timer_list *t)
-{
- struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
- struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
- fpe_cfg);
- unsigned long flags;
- bool rearm = false;
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- switch (fpe_cfg->status) {
- case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
- case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
- if (fpe_cfg->verify_retries != 0) {
- stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY);
- rearm = true;
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
- }
-
- fpe_cfg->verify_retries--;
- break;
-
- case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
- stmmac_fpe_configure(priv, true, true);
- break;
-
- default:
- break;
- }
-
- if (rearm) {
- mod_timer(&fpe_cfg->verify_timer,
- jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
-static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
-{
- if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
- fpe_cfg->verify_enabled &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
- fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
- timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
- mod_timer(&fpe_cfg->verify_timer, jiffies);
- }
-}
-
void stmmac_fpe_init(struct stmmac_priv *priv)
{
- priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
- priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
- timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
- spin_lock_init(&priv->fpe_cfg.lock);
+ ethtool_mmsv_init(&priv->fpe_cfg.mmsv, priv->dev,
+ &stmmac_mmsv_ops);
if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
priv->dma_cap.fpesel)
dev_info(priv->device, "FPE is not supported by driver.\n");
}
-void stmmac_fpe_apply(struct stmmac_priv *priv)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
-
- /* If verification is disabled, configure FPE right away.
- * Otherwise let the timer code do it.
- */
- if (!fpe_cfg->verify_enabled) {
- stmmac_fpe_configure(priv, fpe_cfg->tx_enabled,
- fpe_cfg->pmac_enabled);
- } else {
- fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
- fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
-
- if (netif_running(priv->dev))
- stmmac_fpe_verify_timer_arm(fpe_cfg);
- }
-}
-
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
-{
- struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- unsigned long flags;
-
- timer_shutdown_sync(&fpe_cfg->verify_timer);
-
- spin_lock_irqsave(&fpe_cfg->lock, flags);
-
- if (is_up && fpe_cfg->pmac_enabled) {
- /* VERIFY process requires pmac enabled when NIC comes up */
- stmmac_fpe_configure(priv, false, true);
-
- /* New link => maybe new partner => new verification process */
- stmmac_fpe_apply(priv);
- } else {
- /* No link => turn off EFPE */
- stmmac_fpe_configure(priv, false, false);
- }
-
- spin_unlock_irqrestore(&fpe_cfg->lock, flags);
-}
-
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
{
const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
index b884eac7142d..3fc46acf7001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h
@@ -9,15 +9,10 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
-#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
-
struct stmmac_priv;
-void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up);
bool stmmac_fpe_supported(struct stmmac_priv *priv);
void stmmac_fpe_init(struct stmmac_priv *priv);
-void stmmac_fpe_apply(struct stmmac_priv *priv);
void stmmac_fpe_irq_status(struct stmmac_priv *priv);
int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv);
void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 59d07d0d3369..085c09039af4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -568,18 +568,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
+ * @extack: netlink extended ack structure for error reporting.
* Description:
* This function configures the MAC to enable/disable both outgoing(TX)
* and incoming(RX) packets time stamping based on user input.
* Return Value:
* 0 on success and an appropriate -ve integer on failure.
*/
-static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config config;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -590,34 +591,36 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 ts_event_en = 0;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
- netdev_alert(priv->dev, "No support for HW time stamping\n");
+ NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
return -EOPNOTSUPP;
}
- if (copy_from_user(&config, ifr->ifr_data,
- sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change timestamping configuration while down");
+ return -ENODEV;
+ }
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
- __func__, config.flags, config.tx_type, config.rx_filter);
+ __func__, config->flags, config->tx_type, config->rx_filter);
- if (config.tx_type != HWTSTAMP_TX_OFF &&
- config.tx_type != HWTSTAMP_TX_ON)
+ if (config->tx_type != HWTSTAMP_TX_OFF &&
+ config->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
if (priv->adv_ts) {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* time stamp no incoming packet at all */
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* 'xmac' hardware can support Sync, Pdelay_Req and
* Pdelay_resp by setting bit14 and bits17/16 to 01
* This leaves Delay_Req timestamps out.
@@ -631,7 +634,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
/* PTP v1, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -641,7 +644,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
/* PTP v1, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -652,7 +655,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
/* PTP v2, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
@@ -663,7 +666,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
/* PTP v2, UDP, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -674,7 +677,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
/* PTP v2, UDP, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -686,7 +689,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
/* PTP v2/802.AS1 any layer, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
if (priv->synopsys_id < DWMAC_CORE_4_10)
@@ -698,7 +701,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
/* PTP v2/802.AS1, any layer, Sync packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for SYNC messages only */
ts_event_en = PTP_TCR_TSEVNTENA;
@@ -710,7 +713,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* PTP v2/802.AS1, any layer, Delay_req packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for Delay_Req messages only */
ts_master_en = PTP_TCR_TSMSTRENA;
@@ -724,7 +727,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
/* time stamp any incoming packet */
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
tstamp_all = PTP_TCR_TSENALL;
break;
@@ -732,18 +735,18 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
} else {
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
break;
default:
/* PTP v1, UDP, any kind of event packet */
- config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
}
}
- priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
- priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+ priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
+ priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
@@ -756,31 +759,30 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
- memcpy(&priv->tstamp_config, &config, sizeof(config));
+ priv->tstamp_config = *config;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
/**
* stmmac_hwtstamp_get - read hardware timestamping.
* @dev: device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
- * a proprietary structure used to pass information to the driver.
+ * @config: the timestamping configuration.
* Description:
* This function obtain the current hardware timestamping settings
* as requested.
*/
-static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int stmmac_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct hwtstamp_config *config = &priv->tstamp_config;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, config,
- sizeof(*config)) ? -EFAULT : 0;
+ *config = priv->tstamp_config;
+
+ return 0;
}
/**
@@ -946,7 +948,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, false);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
}
static void stmmac_mac_link_up(struct phylink_config *config,
@@ -1064,7 +1066,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_set_eee_pls(priv, priv->hw, true);
if (stmmac_fpe_supported(priv))
- stmmac_fpe_link_state_handle(priv, true);
+ ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
stmmac_hwtstamp_correct_latency(priv, priv);
@@ -1258,20 +1260,22 @@ static int stmmac_init_phy(struct net_device *dev)
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
- int mode = priv->plat->phy_interface;
+ struct phylink_config *config;
struct fwnode_handle *fwnode;
struct phylink_pcs *pcs;
struct phylink *phylink;
- priv->phylink_config.dev = &priv->dev->dev;
- priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.mac_managed_pm = true;
+ config = &priv->phylink_config;
+
+ config->dev = &priv->dev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_managed_pm = true;
/* Stmmac always requires an RX clock for hardware initialization */
- priv->phylink_config.mac_requires_rxc = true;
+ config->mac_requires_rxc = true;
if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
- priv->phylink_config.eee_rx_clk_stop_enable = true;
+ config->eee_rx_clk_stop_enable = true;
/* Set the default transmit clock stop bit based on the platform glue */
priv->tx_lpi_clk_stop = priv->plat->flags &
@@ -1279,13 +1283,22 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.default_an_inband =
- mdio_bus_data->default_an_inband;
+ config->default_an_inband = mdio_bus_data->default_an_inband;
- /* Set the platform/firmware specified interface mode. Note, phylink
- * deals with the PHY interface mode, not the MAC interface mode.
+ /* Get the PHY interface modes (at the PHY end of the link) that
+ * are supported by the platform.
*/
- __set_bit(mode, priv->phylink_config.supported_interfaces);
+ if (priv->plat->get_interfaces)
+ priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
+ config->supported_interfaces);
+
+ /* Set the platform/firmware specified interface mode if the
+ * supported interfaces have not already been provided using
+ * phy_interface as a last resort.
+ */
+ if (phy_interface_empty(config->supported_interfaces))
+ __set_bit(priv->plat->phy_interface,
+ config->supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
@@ -1294,29 +1307,27 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
pcs = priv->hw->phylink_pcs;
if (pcs)
- phy_interface_or(priv->phylink_config.supported_interfaces,
- priv->phylink_config.supported_interfaces,
+ phy_interface_or(config->supported_interfaces,
+ config->supported_interfaces,
pcs->supported_interfaces);
if (priv->dma_cap.eee) {
/* Assume all supported interfaces also support LPI */
- memcpy(priv->phylink_config.lpi_interfaces,
- priv->phylink_config.supported_interfaces,
- sizeof(priv->phylink_config.lpi_interfaces));
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
/* All full duplex speeds above 100Mbps are supported */
- priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
- MAC_100FD;
- priv->phylink_config.lpi_timer_default = eee_timer * 1000;
- priv->phylink_config.eee_enabled_default = true;
+ config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
+ config->lpi_timer_default = eee_timer * 1000;
+ config->eee_enabled_default = true;
}
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
- phylink = phylink_create(&priv->phylink_config, fwnode,
- mode, &stmmac_phylink_mac_ops);
+ phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
+ &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
@@ -4152,7 +4163,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
pm_runtime_put(priv->device);
@@ -4488,8 +4499,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
priv->hwts_tx_en)) {
/* declare that device is doing timestamping */
@@ -4522,6 +4531,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4765,8 +4775,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
- skb_tx_timestamp(skb);
-
/* Ready to fill the first descriptor and set the OWN bit w/o any
* problems because all the descriptors are actually ready to be
* passed to the DMA engine.
@@ -4813,7 +4821,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
-
+ skb_tx_timestamp(skb);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -6219,12 +6227,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCSMIIREG:
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
- case SIOCSHWTSTAMP:
- ret = stmmac_hwtstamp_set(dev, rq);
- break;
- case SIOCGHWTSTAMP:
- ret = stmmac_hwtstamp_get(dev, rq);
- break;
default:
break;
}
@@ -7163,6 +7165,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
.ndo_xsk_wakeup = stmmac_xsk_wakeup,
+ .ndo_hwtstamp_get = stmmac_hwtstamp_get,
+ .ndo_hwtstamp_set = stmmac_hwtstamp_set,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -7644,7 +7648,7 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
priv->hw->hw_vlan_en = true;
}
@@ -7727,9 +7731,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_mdio_register;
}
- if (priv->plat->speed_mode_2500)
- priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
-
ret = stmmac_pcs_setup(ndev);
if (ret)
goto error_pcs_setup;
@@ -7871,7 +7872,7 @@ int stmmac_suspend(struct device *dev)
rtnl_unlock();
if (stmmac_fpe_supported(priv))
- timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+ ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index c73eff6a56b8..43c869f64c39 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -709,6 +709,17 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
#endif /* CONFIG_OF */
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_find_clk);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 72dc1a32e46d..6e6561e29d6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -14,6 +14,9 @@
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
+struct clk *stmmac_pltfr_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name);
+
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
new file mode 100644
index 000000000000..0b6f6228ae35
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN (802.1Q) handling
+ */
+
+#include "stmmac.h"
+#include "stmmac_vlan.h"
+
+static void vlan_write_single(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~VLAN_TAG_VID;
+ val |= VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + VLAN_TAG);
+}
+
+static int vlan_write_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int ret;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + VLAN_TAG_DATA);
+
+ val = readl(ioaddr + VLAN_TAG);
+ val &= ~(VLAN_TAG_CTRL_OFS_MASK |
+ VLAN_TAG_CTRL_CT |
+ VLAN_TAG_CTRL_OB);
+ val |= (index << VLAN_TAG_CTRL_OFS_SHIFT) | VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + VLAN_TAG);
+
+ ret = readl_poll_timeout(ioaddr + VLAN_TAG, val,
+ !(val & VLAN_TAG_CTRL_OB),
+ 1000, 500000);
+ if (ret) {
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= VLAN_TAG_DATA_ETV | VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = vlan_write_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) {
+ ret = vlan_write_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ vlan_write_single(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + VLAN_HASH_TABLE);
+ if (hash & VLAN_VLHT) {
+ value = readl(ioaddr + VLAN_TAG);
+ value |= VLAN_VTHM;
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ if (hash) {
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = VLAN_ETV;
+
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+static void vlan_enable(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + VLAN_INCL);
+ value |= VLAN_VLTI;
+ value |= VLAN_CSVL; /* Only use SVLAN */
+ value &= ~VLAN_VLC;
+ value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
+ writel(value, ioaddr + VLAN_INCL);
+}
+
+static void vlan_rx_hw(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void vlan_set_hw_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + VLAN_TAG);
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ u16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value |= VLAN_VTHM | VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value, ioaddr + VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~VLAN_VTHM;
+ value |= VLAN_ETV;
+ if (is_double) {
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
+ } else {
+ value &= ~VLAN_EDVLP;
+ value &= ~VLAN_ESVL;
+ value &= ~VLAN_DOVLTC;
+ }
+
+ value &= ~VLAN_VID;
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + VLAN_TAG);
+
+ value &= ~(VLAN_VTHM | VLAN_ETV);
+ value &= ~(VLAN_EDVLP | VLAN_ESVL);
+ value &= ~VLAN_DOVLTC;
+ value &= ~VLAN_VID;
+
+ writel(value, ioaddr + VLAN_TAG);
+ }
+}
+
+const struct stmmac_vlan_ops dwmac_vlan_ops = {
+ .update_vlan_hash = vlan_update_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+const struct stmmac_vlan_ops dwxlgmac2_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+};
+
+const struct stmmac_vlan_ops dwxgmac210_vlan_ops = {
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .enable_vlan = vlan_enable,
+ .add_hw_vlan_rx_fltr = vlan_add_hw_rx_fltr,
+ .del_hw_vlan_rx_fltr = vlan_del_hw_rx_fltr,
+ .restore_hw_vlan_rx_fltr = vlan_restore_hw_rx_fltr,
+ .rx_hw_vlan = vlan_rx_hw,
+ .set_hw_vlan_mode = vlan_set_hw_mode,
+};
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + HW_FEATURE3);
+ switch (val & VLAN_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
new file mode 100644
index 000000000000..c24f89a9049b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025, Altera Corporation
+ * stmmac VLAN(802.1Q) handling
+ */
+
+#ifndef __STMMAC_VLAN_H__
+#define __STMMAC_VLAN_H__
+
+#include <linux/bitfield.h>
+#include "dwxgmac2.h"
+
+#define VLAN_TAG 0x00000050
+#define VLAN_TAG_DATA 0x00000054
+#define VLAN_HASH_TABLE 0x00000058
+#define VLAN_INCL 0x00000060
+
+/* MAC VLAN */
+#define VLAN_EDVLP BIT(26)
+#define VLAN_VTHM BIT(25)
+#define VLAN_DOVLTC BIT(20)
+#define VLAN_ESVL BIT(18)
+#define VLAN_ETV BIT(16)
+#define VLAN_VID GENMASK(15, 0)
+#define VLAN_VLTI BIT(20)
+#define VLAN_CSVL BIT(19)
+#define VLAN_VLC GENMASK(17, 16)
+#define VLAN_VLC_SHIFT 16
+#define VLAN_VLHT GENMASK(15, 0)
+
+/* MAC VLAN Tag */
+#define VLAN_TAG_VID GENMASK(15, 0)
+#define VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define VLAN_TAG_CTRL_OB BIT(0)
+#define VLAN_TAG_CTRL_CT BIT(1)
+#define VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define VLAN_TAG_CTRL_OFS_SHIFT 2
+#define VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define VLAN_TAG_STRIP_NONE FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x0)
+#define VLAN_TAG_STRIP_PASS FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x1)
+#define VLAN_TAG_STRIP_FAIL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x2)
+#define VLAN_TAG_STRIP_ALL FIELD_PREP(VLAN_TAG_CTRL_EVLS_MASK, 0x3)
+
+/* MAC VLAN Tag Data/Filter */
+#define VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define VLAN_TAG_DATA_VEN BIT(16)
+#define VLAN_TAG_DATA_ETV BIT(17)
+
+/* MAC VLAN HW FEAT */
+#define HW_FEATURE3 0x00000128
+#define VLAN_HW_FEAT_NRVF GENMASK(2, 0)
+
+extern const struct stmmac_vlan_ops dwmac_vlan_ops;
+extern const struct stmmac_vlan_ops dwxgmac210_vlan_ops;
+extern const struct stmmac_vlan_ops dwxlgmac2_vlan_ops;
+
+u32 stmmac_get_num_vlan(void __iomem *ioaddr);
+
+#endif /* __STMMAC_VLAN_H__ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1e6d2335293d..f20d1ff192ef 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -427,7 +427,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
if (netif_tx_queue_stopped(netif_txq)) {
/* try recover if stopped by us */
- txq_trans_update(netif_txq);
+ txq_trans_update(ndev, netif_txq);
netif_tx_wake_queue(netif_txq);
}
}
@@ -2679,13 +2679,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
ret = of_get_mac_address(port_np, port->slave.mac_addr);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ goto of_node_put;
+ } else if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
eth_random_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ dev_info(dev, "Use random MAC address\n");
}
}
@@ -2760,7 +2762,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
- port->ndev->dev.of_node = port->slave.port_np;
+ device_set_node(&port->ndev->dev, of_fwnode_handle(port->slave.port_np));
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -3561,6 +3563,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_nuss_get_ver(common);
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_pm_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_pm_clear;
+
node = of_get_child_by_name(dev->of_node, "mdio");
if (!node) {
dev_warn(dev, "MDIO node not found\n");
@@ -3577,16 +3589,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
}
of_node_put(node);
- am65_cpsw_nuss_get_ver(common);
-
- ret = am65_cpsw_nuss_init_host_p(common);
- if (ret)
- goto err_of_clear;
-
- ret = am65_cpsw_nuss_init_slave_ports(common);
- if (ret)
- goto err_of_clear;
-
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a984b7d84e5e..54c24cd3d3be 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1156,6 +1156,27 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
}
#endif
+/* We need a custom implementation of phy_do_ioctl_running() because in switch
+ * mode, dev->phydev may be different than the phy of the active_slave. We need
+ * to operate on the locally saved phy instead.
+ */
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ phy = cpsw->slaves[slave_no].phy;
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -1174,6 +1195,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
@@ -1646,6 +1669,9 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
+ /* Hijack PHY timestamping requests in order to block them */
+ if (!cpsw->data.dual_emac)
+ ndev->see_all_hwtstamp_requests = true;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 5b5b52e4e7a7..8b9e2078c602 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1132,7 +1132,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_set_mac_address = cpsw_ndo_set_mac_address,
- .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = cpsw_ndo_tx_timeout,
.ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
@@ -1147,6 +1147,8 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_bpf = cpsw_ndo_bpf,
.ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
.ndo_get_port_parent_id = cpsw_get_port_parent_id,
+ .ndo_hwtstamp_get = cpsw_hwtstamp_get,
+ .ndo_hwtstamp_set = cpsw_hwtstamp_set,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 6fe4edabba44..bc4fdf17a99e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -614,24 +614,29 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
- struct hwtstamp_config cfg;
+
+ /* This will only execute if dev->see_all_hwtstamp_requests is set */
+ if (cfg->source != HWTSTAMP_SOURCE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Switch mode only supports MAC timestamping");
+ return -EOPNOTSUPP;
+ }
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->rx_ts_enabled = 0;
break;
@@ -651,13 +656,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+ priv->tx_ts_enabled = cfg->tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -671,65 +676,40 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
WARN_ON(1);
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
struct cpsw_priv *priv = netdev_priv(dev);
- struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
cpsw->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
- cfg.flags = 0;
- cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = priv->rx_ts_enabled;
+ cfg->tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg->rx_filter = priv->rx_ts_enabled;
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
#else
-static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
{
return -EOPNOTSUPP;
}
-static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
#endif /*CONFIG_TI_CPTS*/
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- struct cpsw_priv *priv = netdev_priv(dev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
- struct phy_device *phy;
-
- if (!netif_running(dev))
- return -EINVAL;
-
- phy = cpsw->slaves[slave_no].phy;
-
- if (!phy_has_hwtstamp(phy)) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
- }
- }
-
- if (phy)
- return phy_mii_ioctl(phy, req, cmd);
-
- return -EOPNOTSUPP;
-}
-
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index f2fc55d9295d..91add8925e23 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -461,7 +461,6 @@ void soft_reset(const char *module, void __iomem *reg);
void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
int cpsw_need_resplit(struct cpsw_common *cpsw);
-int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
@@ -469,6 +468,11 @@ bool cpsw_shp_is_off(struct cpsw_priv *priv);
void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
+int cpsw_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int cpsw_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
/* ethtool */
u32 cpsw_get_msglevel(struct net_device *ndev);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index d88a0180294e..5b8fdb882172 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -1329,10 +1329,28 @@ void icssg_ndo_get_stats64(struct net_device *ndev,
stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
- stats->rx_errors = ndev->stats.rx_errors;
- stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->rx_errors = ndev->stats.rx_errors +
+ emac_get_stat_by_name(emac, "FW_RX_ERROR") +
+ emac_get_stat_by_name(emac, "FW_RX_EOF_SHORT_FRMERR") +
+ emac_get_stat_by_name(emac, "FW_RX_B0_DROP_EARLY_EOF") +
+ emac_get_stat_by_name(emac, "FW_RX_EXP_FRAG_Q_DROP") +
+ emac_get_stat_by_name(emac, "FW_RX_FIFO_OVERRUN");
+ stats->rx_dropped = ndev->stats.rx_dropped +
+ emac_get_stat_by_name(emac, "FW_DROPPED_PKT") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_DISABLED") +
+ emac_get_stat_by_name(emac, "FW_INF_SAV") +
+ emac_get_stat_by_name(emac, "FW_INF_SA_DL") +
+ emac_get_stat_by_name(emac, "FW_INF_PORT_BLOCKED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_TAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_PRIOTAGGED") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTAG") +
+ emac_get_stat_by_name(emac, "FW_INF_DROP_NOTMEMBER");
stats->tx_errors = ndev->stats.tx_errors;
- stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped +
+ emac_get_stat_by_name(emac, "FW_RTU_PKT_DROP") +
+ emac_get_stat_by_name(emac, "FW_TX_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_TS_DROPPED_PACKET") +
+ emac_get_stat_by_name(emac, "FW_TX_JUMBO_FRM_CUTOFF");
}
EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index b6be4aa57a61..23c465f1ce7f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -54,7 +54,7 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
-#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_PA_STATS 32
#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 6f0edae38ea2..e8241e998aa9 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,7 +11,6 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
-#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -46,9 +45,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
if (prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
- reg = ICSSG_FW_STATS_BASE +
- icssg_all_pa_stats[i].offset *
- PRUETH_NUM_MACS + slice * sizeof(u32);
+ reg = icssg_all_pa_stats[i].offset +
+ slice * sizeof(u32);
regmap_read(prueth->pa_stats, reg, &val);
emac->pa_stats[i] += val;
}
@@ -80,7 +78,7 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
if (emac->prueth->pa_stats) {
for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
- return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
+ return emac->pa_stats[i];
}
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index e88b919f532c..5ec0b38e0c67 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -155,24 +155,10 @@ static const struct icssg_miig_stats icssg_all_miig_stats[] = {
ICSSG_MIIG_STATS(tx_bytes, true),
};
-/**
- * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
- * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
- * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
- * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
- * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
- */
-struct pa_stats_regs {
- u32 fw_rx_cnt;
- u32 fw_tx_cnt;
- u32 fw_tx_pre_overflow;
- u32 fw_tx_exp_overflow;
-};
-
-#define ICSSG_PA_STATS(field) \
-{ \
- #field, \
- offsetof(struct pa_stats_regs, field), \
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ field, \
}
struct icssg_pa_stats {
@@ -181,10 +167,38 @@ struct icssg_pa_stats {
};
static const struct icssg_pa_stats icssg_all_pa_stats[] = {
- ICSSG_PA_STATS(fw_rx_cnt),
- ICSSG_PA_STATS(fw_tx_cnt),
- ICSSG_PA_STATS(fw_tx_pre_overflow),
- ICSSG_PA_STATS(fw_tx_exp_overflow),
+ ICSSG_PA_STATS(FW_RTU_PKT_DROP),
+ ICSSG_PA_STATS(FW_Q0_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q1_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q2_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q3_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q4_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q5_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q6_OVERFLOW),
+ ICSSG_PA_STATS(FW_Q7_OVERFLOW),
+ ICSSG_PA_STATS(FW_DROPPED_PKT),
+ ICSSG_PA_STATS(FW_RX_ERROR),
+ ICSSG_PA_STATS(FW_RX_DS_INVALID),
+ ICSSG_PA_STATS(FW_TX_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_TX_TS_DROPPED_PACKET),
+ ICSSG_PA_STATS(FW_INF_PORT_DISABLED),
+ ICSSG_PA_STATS(FW_INF_SAV),
+ ICSSG_PA_STATS(FW_INF_SA_DL),
+ ICSSG_PA_STATS(FW_INF_PORT_BLOCKED),
+ ICSSG_PA_STATS(FW_INF_DROP_TAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_PRIOTAGGED),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTAG),
+ ICSSG_PA_STATS(FW_INF_DROP_NOTMEMBER),
+ ICSSG_PA_STATS(FW_RX_EOF_SHORT_FRMERR),
+ ICSSG_PA_STATS(FW_RX_B0_DROP_EARLY_EOF),
+ ICSSG_PA_STATS(FW_TX_JUMBO_FRM_CUTOFF),
+ ICSSG_PA_STATS(FW_RX_EXP_FRAG_Q_DROP),
+ ICSSG_PA_STATS(FW_RX_FIFO_OVERRUN),
+ ICSSG_PA_STATS(FW_CUT_THR_PKT),
+ ICSSG_PA_STATS(FW_HOST_RX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_TX_PKT_CNT),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_PRE_OVERFLOW),
+ ICSSG_PA_STATS(FW_HOST_EGRESS_Q_EXP_OVERFLOW),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 424a7e945ea8..490a9cc06fb0 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -231,4 +231,37 @@
/* Start of 32 bits PA_STAT counters */
#define PA_STAT_32b_START_OFFSET 0x0080
+#define FW_RTU_PKT_DROP 0x0088
+#define FW_Q0_OVERFLOW 0x0090
+#define FW_Q1_OVERFLOW 0x0098
+#define FW_Q2_OVERFLOW 0x00A0
+#define FW_Q3_OVERFLOW 0x00A8
+#define FW_Q4_OVERFLOW 0x00B0
+#define FW_Q5_OVERFLOW 0x00B8
+#define FW_Q6_OVERFLOW 0x00C0
+#define FW_Q7_OVERFLOW 0x00C8
+#define FW_DROPPED_PKT 0x00F8
+#define FW_RX_ERROR 0x0100
+#define FW_RX_DS_INVALID 0x0108
+#define FW_TX_DROPPED_PACKET 0x0110
+#define FW_TX_TS_DROPPED_PACKET 0x0118
+#define FW_INF_PORT_DISABLED 0x0120
+#define FW_INF_SAV 0x0128
+#define FW_INF_SA_DL 0x0130
+#define FW_INF_PORT_BLOCKED 0x0138
+#define FW_INF_DROP_TAGGED 0x0140
+#define FW_INF_DROP_PRIOTAGGED 0x0148
+#define FW_INF_DROP_NOTAG 0x0150
+#define FW_INF_DROP_NOTMEMBER 0x0158
+#define FW_RX_EOF_SHORT_FRMERR 0x0188
+#define FW_RX_B0_DROP_EARLY_EOF 0x0190
+#define FW_TX_JUMBO_FRM_CUTOFF 0x0198
+#define FW_RX_EXP_FRAG_Q_DROP 0x01A0
+#define FW_RX_FIFO_OVERRUN 0x01A8
+#define FW_CUT_THR_PKT 0x01B0
+#define FW_HOST_RX_PKT_CNT 0x0248
+#define FW_HOST_TX_PKT_CNT 0x0250
+#define FW_HOST_EGRESS_Q_PRE_OVERFLOW 0x0258
+#define FW_HOST_EGRESS_Q_EXP_OVERFLOW 0x0260
+
#endif /* __NET_TI_ICSSG_SWITCH_MAP_H */
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index e4d993f31374..a75bca1243e3 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -8,6 +8,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -16,6 +17,7 @@
#include <linux/cache.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/spi/spi.h>
#include <linux/of_net.h>
@@ -45,7 +47,6 @@
struct mse102x_stats {
u64 xfer_err;
- u64 invalid_cmd;
u64 invalid_ctr;
u64 invalid_dft;
u64 invalid_len;
@@ -56,7 +57,6 @@ struct mse102x_stats {
static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
"SPI transfer errors",
- "Invalid command",
"Invalid CTR",
"Invalid DFT",
"Invalid frame length",
@@ -85,6 +85,8 @@ struct mse102x_net_spi {
struct spi_message spi_msg;
struct spi_transfer spi_xfer;
+ bool valid_cmd_received;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
#endif
@@ -98,16 +100,18 @@ static int mse102x_info_show(struct seq_file *s, void *what)
{
struct mse102x_net_spi *mses = s->private;
- seq_printf(s, "TX ring size : %u\n",
+ seq_printf(s, "TX ring size : %u\n",
skb_queue_len(&mses->mse102x.txq));
- seq_printf(s, "IRQ : %d\n",
+ seq_printf(s, "IRQ : %d\n",
mses->spidev->irq);
- seq_printf(s, "SPI effective speed : %lu\n",
+ seq_printf(s, "SPI effective speed : %lu\n",
(unsigned long)mses->spi_xfer.effective_speed_hz);
- seq_printf(s, "SPI mode : %x\n",
+ seq_printf(s, "SPI mode : %x\n",
mses->spidev->mode);
+ seq_printf(s, "Received valid CMD once : %s\n",
+ str_yes_no(mses->valid_cmd_received));
return 0;
}
@@ -194,10 +198,10 @@ static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
} else if (*cmd != cpu_to_be16(DET_CMD)) {
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, *cmd);
- mse->stats.invalid_cmd++;
ret = -EIO;
} else {
memcpy(rxb, trx + 2, 2);
+ mses->valid_cmd_received = true;
}
return ret;
@@ -306,7 +310,7 @@ static void mse102x_dump_packet(const char *msg, int len, const char *data)
data, len, true);
}
-static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+static irqreturn_t mse102x_rx_pkt_spi(struct mse102x_net *mse)
{
struct sk_buff *skb;
unsigned int rxalign;
@@ -315,31 +319,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
- int ret;
mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- cmd_resp = be16_to_cpu(rx);
-
- if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ if (mse102x_rx_cmd_spi(mse, (u8 *)&rx)) {
usleep_range(50, 100);
+ return IRQ_NONE;
+ }
- mse102x_tx_cmd_spi(mse, CMD_CTR);
- ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
- if (ret)
- return;
-
- cmd_resp = be16_to_cpu(rx);
- if ((cmd_resp & CMD_MASK) != CMD_RTS) {
- net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
- __func__, cmd_resp);
- mse->stats.invalid_rts++;
- drop = true;
- goto drop;
- }
-
- net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
- __func__);
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ drop = true;
+ goto drop;
}
rxlen = cmd_resp & LEN_MASK;
@@ -360,7 +353,7 @@ drop:
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
- return;
+ return IRQ_NONE;
/* 2 bytes Start of frame (before ethernet header)
* 2 bytes Data frame tail (after ethernet frame)
@@ -370,7 +363,7 @@ drop:
if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
- return;
+ return IRQ_HANDLED;
}
if (netif_msg_pktdata(mse))
@@ -381,6 +374,8 @@ drop:
mse->ndev->stats.rx_packets++;
mse->ndev->stats.rx_bytes += rxlen;
+
+ return IRQ_HANDLED;
}
static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
@@ -512,20 +507,36 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
{
struct mse102x_net *mse = _mse;
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ irqreturn_t ret;
mutex_lock(&mses->lock);
- mse102x_rx_pkt_spi(mse);
+ ret = mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
- return IRQ_HANDLED;
+ return ret;
}
static int mse102x_net_open(struct net_device *ndev)
{
+ struct irq_data *irq_data = irq_get_irq_data(ndev->irq);
struct mse102x_net *mse = netdev_priv(ndev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
+ if (!irq_data) {
+ netdev_err(ndev, "Invalid IRQ: %d\n", ndev->irq);
+ return -EINVAL;
+ }
+
+ switch (irqd_get_trigger_type(irq_data)) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ break;
+ default:
+ netdev_warn_once(ndev, "Only IRQ type level recommended, please update your device tree firmware.\n");
+ break;
+ }
+
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
ndev->name, mse);
if (ret < 0) {
@@ -543,7 +554,8 @@ static int mse102x_net_open(struct net_device *ndev)
* So poll for possible packet(s) to re-arm the interrupt.
*/
mutex_lock(&mses->lock);
- mse102x_rx_pkt_spi(mse);
+ if (mse102x_rx_pkt_spi(mse) == IRQ_NONE)
+ mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
netif_dbg(mse, ifup, ndev, "network device up\n");
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 47e3e8434b9e..e5fc942c28cc 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -40,7 +40,7 @@ config NGBE
will be called ngbe.
config TXGBE
- tristate "Wangxun(R) 10GbE PCI Express adapters support"
+ tristate "Wangxun(R) 10/25/40GbE PCI Express adapters support"
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
@@ -55,7 +55,7 @@ config TXGBE
select PCS_XPCS
select LIBWX
help
- This driver supports Wangxun(R) 10GbE PCI Express family of
+ This driver supports Wangxun(R) 10/25/40GbE PCI Express family of
adapters.
More specific information on configuring the driver is in
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index e9f0f1f2309b..9b78b604a94e 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index 43019ec9329c..c12a4cb951f6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -219,7 +219,7 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return -EOPNOTSUPP;
return phylink_ethtool_nway_reset(wx->phylink);
@@ -231,9 +231,6 @@ int wx_get_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
- return -EOPNOTSUPP;
-
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
@@ -243,7 +240,7 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return -EOPNOTSUPP;
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
@@ -255,7 +252,7 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return;
phylink_ethtool_get_pauseparam(wx->phylink, pause);
@@ -267,7 +264,7 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
return -EOPNOTSUPP;
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
@@ -345,6 +342,7 @@ int wx_set_coalesce(struct net_device *netdev,
max_eitr = WX_SP_MAX_EITR;
break;
case wx_mac_aml:
+ case wx_mac_aml40:
max_eitr = WX_AML_MAX_EITR;
break;
default:
@@ -375,6 +373,7 @@ int wx_set_coalesce(struct net_device *netdev,
switch (wx->mac.type) {
case wx_mac_sp:
case wx_mac_aml:
+ case wx_mac_aml40:
tx_itr_param = WX_12K_ITR;
break;
default:
@@ -413,15 +412,10 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
max_combined = 63;
- break;
- default:
+ else
max_combined = 8;
- break;
- }
}
return max_combined;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index aed45abafb1b..0f4be72116b8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -10,6 +10,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_sriov.h"
#include "wx_hw.h"
static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
@@ -112,15 +113,10 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
- break;
- default:
- break;
}
}
@@ -132,15 +128,10 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
- break;
- default:
- break;
}
}
EXPORT_SYMBOL(wx_intr_enable);
@@ -434,14 +425,20 @@ static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
/* polling reply from FW */
- err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000,
- true, wx, buffer, send_cmd);
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
+ timeout * 1000, true, wx, buffer, send_cmd);
if (err) {
wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
send_cmd, wx->swfw_index);
goto rel_out;
}
+ if (hdr->cmd_or_resp.ret_status == 0x80) {
+ wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
+ err = -EINVAL;
+ goto rel_out;
+ }
+
/* expect no reply from FW then return */
if (!return_data)
goto rel_out;
@@ -698,6 +695,7 @@ void wx_init_eeprom_params(struct wx *wx)
switch (wx->mac.type) {
case wx_mac_sp:
case wx_mac_aml:
+ case wx_mac_aml40:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
@@ -767,14 +765,8 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
- break;
- default:
- break;
- }
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -912,14 +904,9 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
- break;
- default:
- break;
}
}
@@ -964,11 +951,28 @@ static void wx_sync_mac_table(struct wx *wx)
}
}
+static void wx_full_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+}
+
/* this function destroys the first RAR entry */
void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
{
memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
- wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].pools = BIT(VMDQ_P(0));
wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
wx_set_rar(wx, 0, wx->mac_table[0].addr,
wx->mac_table[0].pools,
@@ -993,7 +997,7 @@ void wx_flush_sw_mac_table(struct wx *wx)
}
EXPORT_SYMBOL(wx_flush_sw_mac_table);
-static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -1024,7 +1028,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
return -ENOMEM;
}
-static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
{
u32 i;
@@ -1206,6 +1210,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
wx_dbg(wx, "Update mc addr list Complete\n");
}
+static void wx_restore_vf_multicasts(struct wx *wx)
+{
+ u32 i, j, vector_bit, vector_reg;
+ struct vf_data_storage *vfinfo;
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i));
+
+ vfinfo = &wx->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ wx->addr_ctrl.mta_in_use++;
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]);
+ wr32m(wx, WX_PSR_MC_TBL(vector_reg),
+ BIT(vector_bit), BIT(vector_bit));
+ /* errata 5: maintain a copy of the reg table conf */
+ wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
+ }
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(i), vmolr);
+ }
+
+ /* Restore any VF macvlans */
+ wx_full_sync_mac_table(wx);
+}
+
/**
* wx_write_mc_addr_list - write multicast addresses to MTA
* @netdev: network interface device structure
@@ -1223,6 +1256,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev)
wx_update_mc_addr_list(wx, netdev);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ wx_restore_vf_multicasts(wx);
+
return netdev_mc_count(netdev);
}
@@ -1243,7 +1279,7 @@ int wx_set_mac(struct net_device *netdev, void *p)
if (retval)
return retval;
- wx_del_mac_filter(wx, wx->mac.addr, 0);
+ wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0));
eth_hw_addr_set(netdev, addr->sa_data);
memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
@@ -1345,6 +1381,10 @@ static int wx_hpbthresh(struct wx *wx)
/* Calculate delay value for device */
dv_id = WX_DV(link, tc);
+ /* Loopback switch introduces additional latency */
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ dv_id += WX_B2BT(tc);
+
/* Delay value is calculated in bit times convert to KB */
kb = WX_BT2KB(dv_id);
rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
@@ -1400,12 +1440,107 @@ static void wx_pbthresh_setup(struct wx *wx)
wx->fc.low_water = 0;
}
+static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 pfvfspoof, reg_offset, vf_shift;
+
+ vf_shift = WX_VF_IND_SHIFT(vf);
+ reg_offset = WX_VF_REG_OFFSET(vf);
+
+ pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset));
+ if (enable)
+ pfvfspoof |= BIT(vf_shift);
+ else
+ pfvfspoof &= ~BIT(vf_shift);
+ wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof);
+}
+
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ struct wx *wx = netdev_priv(netdev);
+ u32 regval;
+
+ if (vf >= wx->num_vfs)
+ return -EINVAL;
+
+ wx->vfinfo[vf].spoofchk_enabled = setting;
+
+ regval = (setting << vf_bit);
+ wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval);
+
+ if (wx->vfinfo[vf].vlan_count)
+ wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval);
+
+ return 0;
+}
+
+static void wx_configure_virtualization(struct wx *wx)
+{
+ u16 pool = wx->num_rx_pools;
+ u32 reg_offset, vf_shift;
+ u32 i;
+
+ if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return;
+
+ wr32m(wx, WX_PSR_VM_CTL,
+ WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN,
+ FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) |
+ WX_PSR_VM_CTL_REPLEN);
+ while (pool--)
+ wr32m(wx, WX_PSR_VM_L2CTL(pool),
+ WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE);
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ vf_shift = BIT(VMDQ_P(0));
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(0), vf_shift);
+ wr32(wx, WX_TDM_VF_TE(0), vf_shift);
+ } else {
+ vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0));
+
+ /* Enable only the PF pools for Tx/Rx */
+ wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1);
+ wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift));
+ wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1);
+ }
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (!wx->vfinfo[i].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, i, false);
+ /* enable ethertype anti spoofing if hw supports it */
+ wx_set_ethertype_anti_spoofing(wx, true, i);
+ }
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
- value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = (wx->num_vfs == 0) ?
+ WX_CFG_PORT_CTL_NUM_VT_NONE :
+ WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ if (wx->ring_feature[RING_F_RSS].indices == 4)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ } else {
+ value = 0;
+ }
+ }
+
+ value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK |
WX_CFG_PORT_CTL_D_VLAN |
WX_CFG_PORT_CTL_QINQ,
value);
@@ -1466,6 +1601,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable)
}
}
+static void wx_vlan_promisc_enable(struct wx *wx)
+{
+ u32 vlnctrl, i, vind, bits, reg_idx;
+
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) {
+ /* we need to keep the VLAN filter on in SRIOV */
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ } else {
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ return;
+ }
+ /* We are already in VLAN promisc, nothing to do */
+ if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ set_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ /* Add PF to all active pools */
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits |= BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* Set all bits in the VLAN filter table array */
+ for (i = 0; i < wx->mac.vft_size; i++)
+ wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX);
+}
+
+static void wx_scrub_vfta(struct wx *wx)
+{
+ u32 i, vid, bits, vfta, vind, vlvf, reg_idx;
+
+ for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, i);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX);
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN;
+ if (vlvf & WX_PSR_VLAN_SWC_VIEN) {
+ /* if PF is part of this then continue */
+ if (test_bit(vid, wx->active_vlans))
+ continue;
+ }
+ /* remove PF from the pool */
+ vind = WX_VF_IND_SHIFT(VMDQ_P(0));
+ reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0));
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx));
+ bits &= ~BIT(vind);
+ wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits);
+ }
+ /* extract values from vft_shadow and write back to VFTA */
+ for (i = 0; i < wx->mac.vft_size; i++) {
+ vfta = wx->mac.vft_shadow[i];
+ wr32(wx, WX_PSR_VLAN_TBL(i), vfta);
+ }
+}
+
+static void wx_vlan_promisc_disable(struct wx *wx)
+{
+ u32 vlnctrl;
+
+ /* configure vlan filtering */
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ /* We are not in VLAN promisc, nothing to do */
+ if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags))
+ return;
+ /* Set flag so we don't redo unnecessary work */
+ clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags);
+ wx_scrub_vfta(wx);
+}
+
void wx_set_rx_mode(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -1478,7 +1690,7 @@ void wx_set_rx_mode(struct net_device *netdev)
/* Check for Promiscuous and All Multicast modes */
fctrl = rd32(wx, WX_PSR_CTL);
fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
- vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)));
vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
WX_PSR_VM_L2CTL_MPE |
WX_PSR_VM_L2CTL_ROPE |
@@ -1499,7 +1711,10 @@ void wx_set_rx_mode(struct net_device *netdev)
fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
/* pf don't want packets routing to vf, so clear UPE */
vmolr |= WX_PSR_VM_L2CTL_MPE;
- vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
if (netdev->flags & IFF_ALLMULTI) {
@@ -1522,7 +1737,7 @@ void wx_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = wx_write_uc_addr_list(netdev, 0);
+ count = wx_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
vmolr |= WX_PSR_VM_L2CTL_UPE;
@@ -1540,7 +1755,7 @@ void wx_set_rx_mode(struct net_device *netdev)
wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
wr32(wx, WX_PSR_CTL, fctrl);
- wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+ wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr);
if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_STAG_RX))
@@ -1548,6 +1763,10 @@ void wx_set_rx_mode(struct net_device *netdev)
else
wx_vlan_strip_control(wx, false);
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ wx_vlan_promisc_disable(wx);
+ else
+ wx_vlan_promisc_enable(wx);
}
EXPORT_SYMBOL(wx_set_rx_mode);
@@ -1797,6 +2016,13 @@ static void wx_setup_reta(struct wx *wx)
u32 random_key_size = WX_RSS_KEY_SIZE / 4;
u32 i, j;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (wx->mac.type == wx_mac_em)
+ rss_i = 1;
+ else
+ rss_i = rss_i < 4 ? 4 : rss_i;
+ }
+
/* Fill out hash function seeds */
for (i = 0; i < random_key_size; i++)
wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
@@ -1814,10 +2040,42 @@ static void wx_setup_reta(struct wx *wx)
wx_store_reta(wx);
}
+#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1)
+#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2)
+static void wx_setup_psrtype(struct wx *wx)
+{
+ int rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 psrtype;
+ int pool;
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_OUTL2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for_each_set_bit(pool, &wx->fwd_bitmask, 8)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ } else {
+ if (rss_i > 3)
+ psrtype |= WX_RDB_RSS_PL_4;
+ else if (rss_i > 1)
+ psrtype |= WX_RDB_RSS_PL_2;
+
+ for_each_set_bit(pool, &wx->fwd_bitmask, 32)
+ wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype);
+ }
+}
+
static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
+ /* VT, and RSS do not coexist at the same time */
+ if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return;
+
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
@@ -1847,16 +2105,11 @@ static void wx_setup_mrqc(struct wx *wx)
**/
void wx_configure_rx(struct wx *wx)
{
- u32 psrtype, i;
int ret;
+ u32 i;
wx_disable_rx(wx);
-
- psrtype = WX_RDB_PL_CFG_L4HDR |
- WX_RDB_PL_CFG_L3HDR |
- WX_RDB_PL_CFG_L2HDR |
- WX_RDB_PL_CFG_TUN_TUNHDR;
- wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+ wx_setup_psrtype(wx);
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
@@ -1904,6 +2157,7 @@ void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
wx_pbthresh_setup(wx);
+ wx_configure_virtualization(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@@ -1998,10 +2252,8 @@ int wx_stop_adapter(struct wx *wx)
}
EXPORT_SYMBOL(wx_stop_adapter);
-void wx_reset_misc(struct wx *wx)
+void wx_reset_mac(struct wx *wx)
{
- int i;
-
/* receive packets that size > 2048 */
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
@@ -2013,6 +2265,14 @@ void wx_reset_misc(struct wx *wx)
WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+}
+EXPORT_SYMBOL(wx_reset_mac);
+
+void wx_reset_misc(struct wx *wx)
+{
+ int i;
+
+ wx_reset_mac(wx);
wr32m(wx, WX_MIS_RST_ST,
WX_MIS_RST_ST_RST_INIT, 0x1E00);
@@ -2262,7 +2522,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
{
u32 bitindex, vfta, targetbit;
bool vfta_changed = false;
@@ -2518,7 +2778,8 @@ void wx_update_stats(struct wx *wx)
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
- for (i = 0; i < wx->mac.max_rx_queues; i++)
+ for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
+ i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
EXPORT_SYMBOL(wx_update_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index b883342bb576..26a56cba60b9 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -26,9 +26,12 @@ void wx_init_eeprom_params(struct wx *wx);
void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
void wx_init_rx_addrs(struct wx *wx);
void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool);
+int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int wx_disable_sec_rx_path(struct wx *wx);
void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
@@ -39,9 +42,11 @@ void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
int wx_disable_pcie_master(struct wx *wx);
int wx_stop_adapter(struct wx *wx);
+void wx_reset_mac(struct wx *wx);
void wx_reset_misc(struct wx *wx);
int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
+int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index e69eaa65e0de..5c747509d56b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -5,6 +5,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool/helpers.h>
#include <net/inet_ecn.h>
+#include <linux/workqueue.h>
#include <linux/iopoll.h>
#include <linux/sctp.h>
#include <linux/pci.h>
@@ -1620,6 +1621,65 @@ void wx_napi_disable_all(struct wx *wx)
}
EXPORT_SYMBOL(wx_napi_disable_all);
+static bool wx_set_vmdq_queues(struct wx *wx)
+{
+ u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
+ u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = WX_RSS_DISABLED_MASK;
+ u16 vmdq_m = 0;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+ /* Add starting offset to total pool count */
+ vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 64, vmdq_i);
+
+ /* 64 pool mode with 2 queues per pool, or
+ * 16/32/64 pool mode with 1 queue per pool
+ */
+ if (vmdq_i > 32 || rss_i < 4) {
+ vmdq_m = WX_VMDQ_2Q_MASK;
+ rss_m = WX_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = WX_VMDQ_4Q_MASK;
+ rss_m = WX_RSS_4Q_MASK;
+ rss_i = 4;
+ }
+ } else {
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, 8, vmdq_i);
+
+ /* when VMDQ on, disable RSS */
+ rss_i = 1;
+ }
+
+ /* remove the starting offset from the pool count */
+ vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ wx->ring_feature[RING_F_RSS].indices = rss_i;
+ wx->ring_feature[RING_F_RSS].mask = rss_m;
+
+ wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
+ wx->num_rx_pools = vmdq_i;
+ wx->num_rx_queues_per_pool = rss_i;
+
+ wx->num_rx_queues = vmdq_i * rss_i;
+ wx->num_tx_queues = vmdq_i * rss_i;
+
+ return true;
+}
+
/**
* wx_set_rss_queues: Allocate queues for RSS
* @wx: board private structure to initialize
@@ -1634,6 +1694,10 @@ static void wx_set_rss_queues(struct wx *wx)
/* set mask for 16 queue limit of RSS */
f = &wx->ring_feature[RING_F_RSS];
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ f->mask = WX_RSS_64Q_MASK;
+ else
+ f->mask = WX_RSS_8Q_MASK;
f->indices = f->limit;
if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
@@ -1666,6 +1730,9 @@ static void wx_set_num_queues(struct wx *wx)
wx->num_tx_queues = 1;
wx->queues_per_pool = 1;
+ if (wx_set_vmdq_queues(wx))
+ return;
+
wx_set_rss_queues(wx);
}
@@ -1746,6 +1813,10 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
+ /* Disable VMDq support */
+ dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+
/* Disable RSS */
dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
wx->ring_feature[RING_F_RSS].limit = 1;
@@ -1772,6 +1843,49 @@ static int wx_set_interrupt_capability(struct wx *wx)
return 0;
}
+static bool wx_cache_ring_vmdq(struct wx *wx)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
+ u16 reg_idx;
+ int i;
+
+ /* only proceed if VMDq is enabled */
+ if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
+ return false;
+
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->rx_ring[i]->reg_idx = reg_idx;
+ }
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ wx->tx_ring[i]->reg_idx = reg_idx;
+ }
+ } else {
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->rx_ring[i]->reg_idx = reg_idx + i;
+
+ reg_idx = vmdq->offset;
+ for (i = 0; i < wx->num_tx_queues; i++)
+ /* If we are greater than indices move to next pool */
+ wx->tx_ring[i]->reg_idx = reg_idx + i;
+ }
+
+ return true;
+}
+
/**
* wx_cache_ring_rss - Descriptor ring to register mapping for RSS
* @wx: board private structure to initialize
@@ -1783,6 +1897,9 @@ static void wx_cache_ring_rss(struct wx *wx)
{
u16 i;
+ if (wx_cache_ring_vmdq(wx))
+ return;
+
for (i = 0; i < wx->num_rx_queues; i++)
wx->rx_ring[i]->reg_idx = i;
@@ -1843,6 +1960,7 @@ static int wx_alloc_q_vector(struct wx *wx,
switch (wx->mac.type) {
case wx_mac_sp:
case wx_mac_aml:
+ case wx_mac_aml40:
default_itr = WX_12K_ITR;
break;
default:
@@ -2181,7 +2299,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
- msix_vector += 1; /* offset for queue vectors */
+ if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7))
+ msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2210,6 +2329,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
break;
case wx_mac_aml:
+ case wx_mac_aml40:
itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
break;
default:
@@ -2233,10 +2353,17 @@ void wx_configure_vectors(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
u32 eitrsel = 0;
- u16 v_idx;
+ u16 v_idx, i;
if (pdev->msix_enabled) {
/* Populate MSIX to EITR Select */
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (wx->num_vfs >= 32)
+ eitrsel = BIT(wx->num_vfs % 32) - 1;
+ } else {
+ for (i = 0; i < wx->num_vfs; i++)
+ eitrsel |= BIT(i);
+ }
wr32(wx, WX_PX_ITRSEL, eitrsel);
/* use EIAM to auto-mask when MSI-X interrupt is asserted
* this saves a register write for every interrupt
@@ -2876,6 +3003,33 @@ netdev_features_t wx_fix_features(struct net_device *netdev,
}
EXPORT_SYMBOL(wx_fix_features);
+#define WX_MAX_TUNNEL_HDR_LEN 80
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!skb->encapsulation)
+ return features;
+
+ if (wx->mac.type == wx_mac_em)
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+ WX_MAX_TUNNEL_HDR_LEN))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER &&
+ skb->inner_protocol != htons(ETH_P_IP) &&
+ skb->inner_protocol != htons(ETH_P_IPV6) &&
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+EXPORT_SYMBOL(wx_features_check);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
@@ -2942,5 +3096,35 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count,
}
EXPORT_SYMBOL(wx_set_ring);
+void wx_service_event_schedule(struct wx *wx)
+{
+ if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state))
+ queue_work(system_power_efficient_wq, &wx->service_task);
+}
+EXPORT_SYMBOL(wx_service_event_schedule);
+
+void wx_service_event_complete(struct wx *wx)
+{
+ if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state)))
+ return;
+
+ /* flush memory to make sure state is correct before next watchdog */
+ smp_mb__before_atomic();
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+EXPORT_SYMBOL(wx_service_event_complete);
+
+void wx_service_timer(struct timer_list *t)
+{
+ struct wx *wx = from_timer(wx, t, service_timer);
+ unsigned long next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&wx->service_timer, next_event_offset + jiffies);
+
+ wx_service_event_schedule(wx);
+}
+EXPORT_SYMBOL(wx_service_timer);
+
MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index fdeb0c315b75..aed6ea8cf0d6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -33,7 +33,13 @@ void wx_get_stats64(struct net_device *netdev,
int wx_set_features(struct net_device *netdev, netdev_features_t features);
netdev_features_t wx_fix_features(struct net_device *netdev,
netdev_features_t features);
+netdev_features_t wx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
+void wx_service_event_schedule(struct wx *wx);
+void wx_service_event_complete(struct wx *wx);
+void wx_service_timer(struct timer_list *t);
-#endif /* _NGBE_LIB_H_ */
+#endif /* _WX_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
new file mode 100644
index 000000000000..73af5f11c3bd
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include "wx_type.h"
+#include "wx_mbx.h"
+
+/**
+ * wx_obtain_mbx_lock_pf - obtain mailbox lock
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf)
+{
+ int count = 5;
+ u32 mailbox;
+
+ while (count--) {
+ /* Take ownership of the buffer */
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ mailbox = rd32(wx, WX_PXMAILBOX(vf));
+ if (mailbox & WX_PXMAILBOX_PFU)
+ return 0;
+ else if (count)
+ udelay(10);
+ }
+ wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf);
+
+ return -EBUSY;
+}
+
+static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index)
+{
+ u32 mbvficr = rd32(wx, WX_MBVFICR(index));
+
+ if (!(mbvficr & mask))
+ return -EBUSY;
+ wr32(wx, WX_MBVFICR(index), mask);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_ack_pf - checks to see if the VF has acked
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has set the status bit or else -EBUSY
+ **/
+int wx_check_for_ack_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFACK_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_check_for_msg_pf - checks to see if the VF has sent mail
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 if the VF has got req bit or else -EBUSY
+ **/
+int wx_check_for_msg_pf(struct wx *wx, u16 vf)
+{
+ u32 index = vf / 16, vf_bit = vf % 16;
+
+ return wx_check_for_bit_pf(wx,
+ FIELD_PREP(WX_MBVFICR_VFREQ_MASK,
+ BIT(vf_bit)),
+ index);
+}
+
+/**
+ * wx_write_mbx_pf - Places a message in the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EINVAL/-EBUSY on failure
+ **/
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret, i;
+
+ /* mbx->size is up to 15 */
+ if (size > mbx->size) {
+ wx_err(wx, "Invalid mailbox message size %d", size);
+ return -EINVAL;
+ }
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ wx_check_for_msg_pf(wx, vf);
+ wx_check_for_ack_pf(wx, vf);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ wr32a(wx, WX_PXMBMEM(vf), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS);
+
+ return 0;
+}
+
+/**
+ * wx_read_mbx_pf - Read a message from the mailbox
+ * @wx: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf)
+{
+ struct wx_mbx_info *mbx = &wx->mbx;
+ int ret;
+ u16 i;
+
+ /* limit read to size of mailbox and mbx->size is up to 15 */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret = wx_obtain_mbx_lock_pf(wx, vf);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < size; i++)
+ msg[i] = rd32a(wx, WX_PXMBMEM(vf), i);
+
+ /* Acknowledge the message and release buffer */
+ /* set mirrored mailbox flags */
+ wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK);
+ wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK);
+
+ return 0;
+}
+
+/**
+ * wx_check_for_rst_pf - checks to see if the VF has reset
+ * @wx: pointer to the HW structure
+ * @vf: the VF index
+ *
+ * Return: return 0 on success and -EBUSY on failure
+ **/
+int wx_check_for_rst_pf(struct wx *wx, u16 vf)
+{
+ u32 reg_offset = WX_VF_REG_OFFSET(vf);
+ u32 vf_shift = WX_VF_IND_SHIFT(vf);
+ u32 vflre = 0;
+
+ vflre = rd32(wx, WX_VFLRE(reg_offset));
+ if (!(vflre & BIT(vf_shift)))
+ return -EBUSY;
+ wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
new file mode 100644
index 000000000000..05aae138dbc3
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+#ifndef _WX_MBX_H_
+#define _WX_MBX_H_
+
+#define WX_VXMAILBOX_SIZE 15
+
+/* PF Registers */
+#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */
+#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */
+#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */
+#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */
+
+#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */
+
+#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */
+#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */
+
+/* SR-IOV specific macros */
+#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */
+#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0)
+#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16)
+
+#define WX_VT_MSGTYPE_ACK BIT(31)
+#define WX_VT_MSGTYPE_NACK BIT(30)
+#define WX_VT_MSGTYPE_CTS BIT(29)
+#define WX_VT_MSGINFO_SHIFT 16
+#define WX_VT_MSGINFO_MASK GENMASK(23, 16)
+
+enum wx_pfvf_api_rev {
+ wx_mbox_api_null,
+ wx_mbox_api_13 = 4, /* API version 1.3 */
+ wx_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API */
+#define WX_VF_RESET 0x01 /* VF requests reset */
+#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */
+#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define WX_VF_GET_RETA 0x0a /* VF request for RETA */
+#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define WX_VF_UPDATE_XCAST_MODE 0x0c
+#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */
+#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */
+
+#define WX_VF_BACKUP 0x8001 /* VF requests backup */
+
+#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */
+#define WX_PF_NOFITY_VF_LINK_STATUS 0x1
+#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31)
+
+#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define WX_VF_DEF_QUEUE 4 /* Default queue offset */
+
+#define WX_VF_PERMADDR_MSG_LEN 4
+
+enum wxvf_xcast_modes {
+ WXVF_XCAST_MODE_NONE = 0,
+ WXVF_XCAST_MODE_MULTI,
+ WXVF_XCAST_MODE_ALLMULTI,
+ WXVF_XCAST_MODE_PROMISC,
+};
+
+int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf);
+int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id);
+int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id);
+
+#endif /* _WX_MBX_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
index 07c015ba338f..2c39b879f977 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -15,12 +15,14 @@
#define WX_INCVAL_100 0xA00000
#define WX_INCVAL_10 0xC7F380
#define WX_INCVAL_EM 0x2000000
+#define WX_INCVAL_AML 0xA00000
#define WX_INCVAL_SHIFT_10GB 20
#define WX_INCVAL_SHIFT_1GB 18
#define WX_INCVAL_SHIFT_100 15
#define WX_INCVAL_SHIFT_10 12
#define WX_INCVAL_SHIFT_EM 22
+#define WX_INCVAL_SHIFT_AML 21
#define WX_OVERFLOW_PERIOD (HZ * 30)
#define WX_PTP_TX_TIMEOUT (HZ)
@@ -504,15 +506,27 @@ static long wx_ptp_create_clock(struct wx *wx)
wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
wx->ptp_caps.settime64 = wx_ptp_settime64;
wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
- if (wx->mac.type == wx_mac_em) {
- wx->ptp_caps.max_adj = 500000000;
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ wx->ptp_caps.max_adj = 250000000;
wx->ptp_caps.n_per_out = 1;
wx->ptp_setup_sdp = wx_ptp_setup_sdp;
wx->ptp_caps.enable = wx_ptp_feature_enable;
- } else {
+ break;
+ case wx_mac_sp:
wx->ptp_caps.max_adj = 250000000;
wx->ptp_caps.n_per_out = 0;
wx->ptp_setup_sdp = NULL;
+ break;
+ case wx_mac_em:
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
@@ -647,10 +661,18 @@ static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
{
- if (wx->mac.type == wx_mac_em) {
+ switch (wx->mac.type) {
+ case wx_mac_aml:
+ case wx_mac_aml40:
+ *shift = WX_INCVAL_SHIFT_AML;
+ *incval = WX_INCVAL_AML;
+ return;
+ case wx_mac_em:
*shift = WX_INCVAL_SHIFT_EM;
*incval = WX_INCVAL_EM;
return;
+ default:
+ break;
}
switch (wx->speed) {
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
new file mode 100644
index 000000000000..e8656d9d733b
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_hw.h"
+#include "wx_mbx.h"
+#include "wx_sriov.h"
+
+static void wx_vf_configuration(struct pci_dev *pdev, int event_mask)
+{
+ bool enable = !!WX_VF_ENABLE_CHECK(event_mask);
+ struct wx *wx = pci_get_drvdata(pdev);
+ u32 vfn = WX_VF_NUM_GET(event_mask);
+
+ if (enable)
+ eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr);
+}
+
+static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs)
+{
+ struct vf_macvlans *mv_list;
+ int num_vf_macvlans, i;
+
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&wx->vf_mvs.mvlist);
+
+ num_vf_macvlans = wx->mac.num_rar_entries -
+ (WX_MAX_PF_MACVLANS + 1 + num_vfs);
+ if (!num_vf_macvlans)
+ return -EINVAL;
+
+ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (!mv_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list[i].vf = -1;
+ mv_list[i].free = true;
+ list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist);
+ }
+ wx->mv_list = mv_list;
+
+ return 0;
+}
+
+static void wx_sriov_clear_data(struct wx *wx)
+{
+ /* set num VFs to 0 to prevent access to vfinfo */
+ wx->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(wx->vfinfo);
+ wx->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(wx->mv_list);
+ wx->mv_list = NULL;
+
+ /* set default pool back to 0 */
+ wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0);
+ wx->ring_feature[RING_F_VMDQ].offset = 0;
+
+ clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ /* Disable VMDq flag so device will be set in NM mode */
+ if (wx->ring_feature[RING_F_VMDQ].limit == 1)
+ clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+}
+
+static int __wx_enable_sriov(struct wx *wx, u8 num_vfs)
+{
+ int i, ret = 0;
+ u32 value = 0;
+
+ set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags);
+ dev_info(&wx->pdev->dev, "SR-IOV enabled with %d VFs\n", num_vfs);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
+ if (!wx->ring_feature[RING_F_VMDQ].limit)
+ wx->ring_feature[RING_F_VMDQ].limit = 1;
+ wx->ring_feature[RING_F_VMDQ].offset = num_vfs;
+
+ wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!wx->vfinfo)
+ return -ENOMEM;
+
+ ret = wx_alloc_vf_macvlans(wx, num_vfs);
+ if (ret)
+ return ret;
+
+ /* Initialize default switching mode VEB */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN);
+
+ for (i = 0; i < num_vfs; i++) {
+ /* enable spoof checking for all VFs */
+ wx->vfinfo[i].spoofchk_enabled = true;
+ wx->vfinfo[i].link_enable = true;
+ /* untrust all VFs */
+ wx->vfinfo[i].trusted = false;
+ /* set the default xcast mode */
+ wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE;
+ }
+
+ if (!test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ value = WX_CFG_PORT_CTL_NUM_VT_8;
+ } else {
+ if (num_vfs < 32)
+ value = WX_CFG_PORT_CTL_NUM_VT_32;
+ else
+ value = WX_CFG_PORT_CTL_NUM_VT_64;
+ }
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_NUM_VT_MASK,
+ value);
+
+ return ret;
+}
+
+static void wx_sriov_reinit(struct wx *wx)
+{
+ rtnl_lock();
+ wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev));
+ rtnl_unlock();
+}
+
+void wx_disable_sriov(struct wx *wx)
+{
+ if (!pci_vfs_assigned(wx->pdev))
+ pci_disable_sriov(wx->pdev);
+ else
+ wx_err(wx, "Unloading driver while VFs are assigned.\n");
+
+ /* clear flags and free allloced data */
+ wx_sriov_clear_data(wx);
+}
+EXPORT_SYMBOL(wx_disable_sriov);
+
+static int wx_pci_sriov_enable(struct pci_dev *dev,
+ int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+ int err = 0, i;
+
+ err = __wx_enable_sriov(wx, num_vfs);
+ if (err)
+ return err;
+
+ wx->num_vfs = num_vfs;
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_vf_configuration(dev, (i | WX_VF_ENABLE));
+
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ wx_sriov_reinit(wx);
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ wx_err(wx, "Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+
+ return num_vfs;
+err_out:
+ wx_sriov_clear_data(wx);
+ return err;
+}
+
+static void wx_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct wx *wx = pci_get_drvdata(dev);
+
+ wx_disable_sriov(wx);
+ wx_sriov_reinit(wx);
+}
+
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct wx *wx = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ wx_pci_sriov_disable(pdev);
+ return 0;
+ }
+
+ wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = wx_pci_sriov_enable(pdev, num_vfs);
+ if (err)
+ return err;
+
+ return num_vfs;
+}
+EXPORT_SYMBOL(wx_pci_sriov_configure);
+
+static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr)
+{
+ u8 hw_addr[ETH_ALEN];
+ int ret = 0;
+
+ ether_addr_copy(hw_addr, mac_addr);
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ ret = wx_add_mac_filter(wx, hw_addr, vf);
+ if (ret >= 0)
+ ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr);
+ else
+ eth_zero_addr(wx->vfinfo[vf].vf_mac_addr);
+
+ return ret;
+}
+
+static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe)
+{
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+
+ vmolr |= WX_PSR_VM_L2CTL_BAM;
+ if (aupe)
+ vmolr |= WX_PSR_VM_L2CTL_AUPE;
+ else
+ vmolr &= ~WX_PSR_VM_L2CTL_AUPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf)
+{
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) |
+ WX_TDM_VLAN_INS_VLANA_DEFAULT;
+
+ wr32(wx, WX_TDM_VLAN_INS(vf), vmvir);
+}
+
+static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf)
+{
+ if (!vid && !add)
+ return 0;
+
+ return wx_set_vfta(wx, vid, vf, (bool)add);
+}
+
+static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 pfvfspoof;
+
+ pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index));
+ if (enable)
+ pfvfspoof |= BIT(vf_bit);
+ else
+ pfvfspoof &= ~BIT(vf_bit);
+ wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof);
+}
+
+static void wx_write_qde(struct wx *wx, u32 vf, u32 qde)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u32 reg = 0, n = vf * q_per_pool / 32;
+ u32 i = vf * q_per_pool;
+
+ reg = rd32(wx, WX_RDM_PF_QDE(n));
+ for (i = (vf * q_per_pool - n * 32);
+ i < ((vf + 1) * q_per_pool - n * 32);
+ i++) {
+ if (qde == 1)
+ reg |= qde << i;
+ else
+ reg &= qde << i;
+ }
+
+ wr32(wx, WX_RDM_PF_QDE(n), reg);
+}
+
+static void wx_clear_vmvir(struct wx *wx, u32 vf)
+{
+ wr32(wx, WX_TDM_VLAN_INS(vf), 0);
+}
+
+static void wx_ping_vf(struct wx *wx, int vf)
+{
+ u32 ping = WX_PF_CONTROL_MSG;
+
+ if (wx->vfinfo[vf].clear_to_send)
+ ping |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, &ping, 1, vf);
+}
+
+static void wx_set_vf_rx_tx(struct wx *wx, int vf)
+{
+ u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf);
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+
+ reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index));
+ reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index));
+
+ if (wx->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | BIT(vf_bit);
+ reg_req_rx = reg_cur_rx | BIT(vf_bit);
+ /* Enable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ wr32(wx, WX_TDM_VF_TE(index), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ wr32(wx, WX_RDM_VF_RE(index), reg_req_rx);
+ } else {
+ reg_req_tx = BIT(vf_bit);
+ reg_req_rx = BIT(vf_bit);
+ /* Disable particular VF */
+ if (reg_cur_tx & reg_req_tx)
+ wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx);
+ if (reg_cur_rx & reg_req_rx)
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx);
+ }
+}
+
+static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+
+ msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos)
+ msgbuf[WX_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[WX_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[WX_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
+}
+
+static void wx_vf_reset_event(struct wx *wx, u16 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u8 num_tcs = netdev_get_num_tc(wx->netdev);
+
+ /* add PF assigned VLAN */
+ wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf);
+
+ /* reset offloads to defaults */
+ wx_set_vmolr(wx, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ wx_clear_vmvir(wx, vf);
+ } else {
+ if (vfinfo->pf_qos || !num_tcs)
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ wx_set_vmvir(wx, vfinfo->pf_vlan,
+ wx->default_up, vf);
+ }
+
+ /* reset multicast table array for vf */
+ wx->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ wx_set_rx_mode(wx->netdev);
+
+ wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf);
+ /* reset VF api back to unknown */
+ wx->vfinfo[vf].vf_api = wx_mbox_api_null;
+}
+
+static void wx_vf_reset_msg(struct wx *wx, u16 vf)
+{
+ const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr;
+ struct net_device *dev = wx->netdev;
+ u32 msgbuf[5] = {0, 0, 0, 0, 0};
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 reg = 0, index, vf_bit;
+ int pf_max_frame;
+
+ /* reset the filters for the device */
+ wx_vf_reset_event(wx, vf);
+
+ /* set vf mac address */
+ if (!is_zero_ether_addr(vf_mac))
+ wx_set_vf_mac(wx, vf, vf_mac);
+
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ /* force drop enable for all VF Rx queues */
+ wx_write_qde(wx, vf, 1);
+
+ /* set transmit and receive for vf */
+ wx_set_vf_rx_tx(wx, vf);
+
+ pf_max_frame = dev->mtu + ETH_HLEN;
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg = BIT(vf_bit);
+ wr32(wx, WX_RDM_VFRE_CLR(index), reg);
+
+ /* enable VF mailbox for further messages */
+ wx->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = WX_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_err(wx, "VF %d has no MAC address assigned", vf);
+ }
+
+ msgbuf[3] = wx->mac.mc_filter_type;
+ wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf);
+}
+
+static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ const u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int ret;
+
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+
+ if (wx->vfinfo[vf].pf_set_mac &&
+ memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) {
+ wx_err(wx,
+ "VF %d attempt to set a MAC but it already had a MAC.",
+ vf);
+ return -EBUSY;
+ }
+
+ ret = wx_set_vf_mac(wx, vf, new_mac);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &wx->vfinfo[vf];
+ u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK)
+ >> WX_VT_MSGINFO_SHIFT;
+ u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ u32 vector_bit, vector_reg, mta_reg, i;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+
+ /* only so many hash values supported */
+ entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES);
+ vfinfo->num_vf_mc_hashes = entries;
+
+ for (i = 0; i < entries; i++)
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+ for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
+ vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]);
+ vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]);
+ mta_reg = wx->mac.mta_shadow[vector_reg];
+ mta_reg |= BIT(vector_bit);
+ wx->mac.mta_shadow[vector_reg] = mta_reg;
+ wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg);
+ }
+ vmolr |= WX_PSR_VM_L2CTL_ROMPE;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+}
+
+static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf)
+{
+ u32 index, vf_bit, vfre;
+ u32 max_frs, reg_val;
+
+ /* determine VF receive enable location */
+ index = WX_VF_REG_OFFSET(vf);
+ vf_bit = WX_VF_IND_SHIFT(vf);
+
+ vfre = rd32(wx, WX_RDM_VF_RE(index));
+ vfre |= BIT(vf_bit);
+ wr32(wx, WX_RDM_VF_RE(index), vfre);
+
+ /* pull current max frame size from hardware */
+ max_frs = DIV_ROUND_UP(max_frame, 1024);
+ reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK;
+ if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA))
+ wr32(wx, WX_MAC_WDG_TIMEOUT,
+ max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA);
+}
+
+static int wx_find_vlvf_entry(struct wx *wx, u32 vlan)
+{
+ int regindex;
+ u32 vlvf;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= WX_PSR_VLAN_SWC_ENTRIES)
+ regindex = -EINVAL;
+
+ return regindex;
+}
+
+static int wx_set_vf_macvlan(struct wx *wx,
+ u16 vf, int index, unsigned char *mac_addr)
+{
+ struct vf_macvlans *entry;
+ struct list_head *pos;
+ int retval = 0;
+
+ if (index <= 1) {
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ wx_del_mac_filter(wx, entry->vf_macvlan, vf);
+ }
+ }
+ }
+
+ if (!index)
+ return 0;
+
+ entry = NULL;
+ list_for_each(pos, &wx->vf_mvs.mvlist) {
+ entry = list_entry(pos, struct vf_macvlans, mvlist);
+ if (entry->free)
+ break;
+ }
+
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ retval = wx_add_mac_filter(wx, mac_addr, vf);
+ if (retval >= 0) {
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+ }
+
+ return retval;
+}
+
+static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK);
+ int ret;
+
+ if (add)
+ wx->vfinfo[vf].vlan_count++;
+ else if (wx->vfinfo[vf].vlan_count)
+ wx->vfinfo[vf].vlan_count--;
+
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && wx->netdev->flags & IFF_PROMISC)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+
+ ret = wx_set_vf_vlan(wx, add, vid, vf);
+ if (!ret && wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vlan_anti_spoofing(wx, true, vf);
+
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && wx->netdev->flags & IFF_PROMISC) {
+ u32 bits = 0, vlvf;
+ int reg_ndx;
+
+ reg_ndx = wx_find_vlvf_entry(wx, vid);
+ if (reg_ndx < 0)
+ return -ENOSPC;
+ wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx);
+ vlvf = rd32(wx, WX_PSR_VLAN_SWC);
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ bits &= ~BIT(VMDQ_P(0));
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ } else {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
+ bits &= ~BIT(VMDQ_P(0) % 32);
+ bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
+ }
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid && !bits)
+ wx_set_vf_vlan(wx, add, vid, VMDQ_P(0));
+ }
+
+ return 0;
+}
+
+static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf)
+{
+ int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >>
+ WX_VT_MSGINFO_SHIFT;
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int err;
+
+ if (wx->vfinfo[vf].pf_set_mac && index > 0) {
+ wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf);
+ return -EINVAL;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ wx_err(wx, "VF %d attempted to set invalid mac\n", vf);
+ return -EINVAL;
+ }
+ /* If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (wx->vfinfo[vf].spoofchk_enabled)
+ wx_set_vf_spoofchk(wx->netdev, vf, false);
+ }
+
+ err = wx_set_vf_macvlan(wx, vf, index, new_mac);
+ if (err == -ENOSPC)
+ wx_err(wx,
+ "VF %d request MACVLAN filter but there is no space\n",
+ vf);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case wx_mbox_api_13:
+ wx->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ wx_err(wx, "VF %d requested invalid api version %u\n", vf, api);
+ return -EINVAL;
+ }
+}
+
+static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ msgbuf[1] = wx->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
+static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ unsigned long fw_version = 0ULL;
+ int ret = 0;
+
+ ret = kstrtoul(wx->eeprom_id, 16, &fw_version);
+ if (ret)
+ return -EOPNOTSUPP;
+ msgbuf[1] = fw_version;
+
+ return 0;
+}
+
+static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf)
+{
+ int xcast_mode = msgbuf[1];
+ u32 vmolr, disable, enable;
+
+ if (wx->vfinfo[vf].xcast_mode == xcast_mode)
+ return 0;
+
+ switch (xcast_mode) {
+ case WXVF_XCAST_MODE_NONE:
+ disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = 0;
+ break;
+ case WXVF_XCAST_MODE_MULTI:
+ disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE;
+ break;
+ case WXVF_XCAST_MODE_ALLMULTI:
+ disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE;
+ break;
+ case WXVF_XCAST_MODE_PROMISC:
+ disable = 0;
+ enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE |
+ WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_VPE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf));
+ vmolr &= ~disable;
+ vmolr |= enable;
+ wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr);
+
+ wx->vfinfo[vf].xcast_mode = xcast_mode;
+ msgbuf[1] = xcast_mode;
+
+ return 0;
+}
+
+static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf)
+{
+ u16 mbx_size = WX_VXMAILBOX_SIZE;
+ u32 msgbuf[WX_VXMAILBOX_SIZE];
+ int retval;
+
+ retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf);
+ if (retval) {
+ wx_err(wx, "Error receiving message from VF\n");
+ return;
+ }
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK))
+ return;
+
+ if (msgbuf[0] == WX_VF_RESET) {
+ wx_vf_reset_msg(wx, vf);
+ return;
+ }
+
+ /* until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+ if (!wx->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ wx_write_mbx_pf(wx, msgbuf, 1, vf);
+ return;
+ }
+
+ switch ((msgbuf[0] & U16_MAX)) {
+ case WX_VF_SET_MAC_ADDR:
+ retval = wx_set_vf_mac_addr(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_MULTICAST:
+ wx_set_vf_multicasts(wx, msgbuf, vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_VLAN:
+ retval = wx_set_vf_vlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_SET_LPE:
+ wx_set_vf_lpe(wx, msgbuf[1], vf);
+ retval = 0;
+ break;
+ case WX_VF_SET_MACVLAN:
+ retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf);
+ break;
+ case WX_VF_API_NEGOTIATE:
+ retval = wx_negotiate_vf_api(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_QUEUES:
+ retval = wx_get_vf_queues(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_LINK_STATE:
+ retval = wx_get_vf_link_state(wx, msgbuf, vf);
+ break;
+ case WX_VF_GET_FW_VERSION:
+ retval = wx_get_fw_version(wx, msgbuf, vf);
+ break;
+ case WX_VF_UPDATE_XCAST_MODE:
+ retval = wx_update_vf_xcast_mode(wx, msgbuf, vf);
+ break;
+ case WX_VF_BACKUP:
+ break;
+ default:
+ wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= WX_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= WX_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+
+ wx_write_mbx_pf(wx, msgbuf, mbx_size, vf);
+}
+
+static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf)
+{
+ u32 msg = WX_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!wx->vfinfo[vf].clear_to_send)
+ wx_write_mbx_pf(wx, &msg, 1, vf);
+}
+
+void wx_msg_task(struct wx *wx)
+{
+ u16 vf;
+
+ for (vf = 0; vf < wx->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!wx_check_for_rst_pf(wx, vf))
+ wx_vf_reset_event(wx, vf);
+
+ /* process any messages pending */
+ if (!wx_check_for_msg_pf(wx, vf))
+ wx_rcv_msg_from_vf(wx, vf);
+
+ /* process any acks */
+ if (!wx_check_for_ack_pf(wx, vf))
+ wx_rcv_ack_from_vf(wx, vf);
+ }
+}
+EXPORT_SYMBOL(wx_msg_task);
+
+void wx_disable_vf_rx_tx(struct wx *wx)
+{
+ wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX);
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX);
+ wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX);
+ }
+}
+EXPORT_SYMBOL(wx_disable_vf_rx_tx);
+
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up)
+{
+ u32 msgbuf[2] = {0, 0};
+ u16 i;
+
+ if (!wx->num_vfs)
+ return;
+ msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG;
+ if (link_up)
+ msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up;
+ if (wx->notify_down)
+ msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING;
+ for (i = 0; i < wx->num_vfs; i++) {
+ if (wx->vfinfo[i].clear_to_send)
+ msgbuf[0] |= WX_VT_MSGTYPE_CTS;
+ wx_write_mbx_pf(wx, msgbuf, 2, i);
+ }
+}
+EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status);
+
+static void wx_set_vf_link_state(struct wx *wx, int vf, int state)
+{
+ wx->vfinfo[vf].link_state = state;
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (netif_running(wx->netdev))
+ wx->vfinfo[vf].link_enable = true;
+ else
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ wx->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ wx->vfinfo[vf].link_enable = false;
+ break;
+ }
+ /* restart the VF */
+ wx->vfinfo[vf].clear_to_send = false;
+ wx_ping_vf(wx, vf);
+
+ wx_set_vf_rx_tx(wx, vf);
+}
+
+void wx_set_all_vfs(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_vfs; i++)
+ wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state);
+}
+EXPORT_SYMBOL(wx_set_all_vfs);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
new file mode 100644
index 000000000000..8a3a47bb5815
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_SRIOV_H_
+#define _WX_SRIOV_H_
+
+#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m))
+#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m))
+#define WX_VF_ENABLE BIT(31)
+
+void wx_disable_sriov(struct wx *wx);
+int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void wx_msg_task(struct wx *wx);
+void wx_disable_vf_rx_tx(struct wx *wx);
+void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up);
+void wx_set_all_vfs(struct wx *wx);
+
+#endif /* _WX_SRIOV_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 4c545b2aa997..7730c9fc3e02 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -20,8 +20,13 @@
/* MSI-X capability fields masks */
#define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF
#define WX_PCI_LINK_STATUS 0xB2
+#define WX_MAX_PF_MACVLANS 15
+#define WX_MAX_VF_MC_ENTRIES 30
/**************** Global Registers ****************************/
+#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v))
+#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v))
+
/* chip control Registers */
#define WX_MIS_PWR 0x10000
#define WX_MIS_RST 0x1000C
@@ -76,6 +81,9 @@
#define WX_MAC_LXONOFFRXC 0x11E0C
/*********************** Receive DMA registers **************************/
+#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4))
+#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4))
+#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4))
#define WX_RDM_DRP_PKT 0x12500
#define WX_RDM_PKT_CNT 0x12504
#define WX_RDM_BYTE_CNT_LSB 0x12508
@@ -84,12 +92,17 @@
/************************* Port Registers ************************************/
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
+#define WX_CFG_PORT_CTL_PFRSTD BIT(14)
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
+#define WX_CFG_PORT_CTL_NUM_VT_NONE 0
+#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1)
+#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2)
+#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3)
/* GPIO Registers */
#define WX_GPIO_DR 0x14800
@@ -112,6 +125,11 @@
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
+#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4))
+#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4))
+#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4))
+#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4))
+
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
@@ -165,6 +183,7 @@
/******************************* PSR Registers *******************************/
/* psr control */
#define WX_PSR_CTL 0x15000
+#define WX_PSR_VM_CTL 0x151B0
/* Header split receive */
#define WX_PSR_CTL_SW_EN BIT(18)
#define WX_PSR_CTL_RSC_ACK BIT(17)
@@ -201,12 +220,17 @@
#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
+#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i))
+#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */
+#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7)
/* VM L2 contorl */
#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */
#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
@@ -245,10 +269,12 @@
#define WX_PSR_VLAN_SWC 0x16220
#define WX_PSR_VLAN_SWC_VM_L 0x16224
#define WX_PSR_VLAN_SWC_VM_H 0x16228
+#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4))
#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */
/* VLAN pool filtering masks */
#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */
#define WX_PSR_VLAN_SWC_ENTRIES 64
+#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0)
/********************************* RSEC **************************************/
/* general rsec */
@@ -259,6 +285,13 @@
#define WX_RSC_ST 0x17004
#define WX_RSC_ST_RSEC_RDY BIT(0)
+/*********************** Transmit DMA registers **************************/
+/* transmit global control */
+#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4))
+#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4))
+/* Per VF Port VLAN insertion rules */
+#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/
+
/****************************** TDB ******************************************/
#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
@@ -328,6 +361,9 @@
#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
+
+#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0)
+#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2
/* MDIO Registers */
#define WX_MSCA 0x11200
#define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
@@ -417,6 +453,15 @@ enum WX_MSCA_CMD_value {
/* Number of 80 microseconds we wait for PCI Express master disable */
#define WX_PCI_MASTER_DISABLE_TIMEOUT 80000
+#define WX_RSS_64Q_MASK 0x3F
+#define WX_RSS_8Q_MASK 0x7
+#define WX_RSS_4Q_MASK 0x3
+#define WX_RSS_2Q_MASK 0x1
+#define WX_RSS_DISABLED_MASK 0x0
+
+#define WX_VMDQ_4Q_MASK 0x7C
+#define WX_VMDQ_2Q_MASK 0x7E
+
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
@@ -484,7 +529,7 @@ enum WX_MSCA_CMD_value {
#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
-#define VMDQ_P(p) p
+#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset)
/* Supported Rx Buffer Sizes */
#define WX_RXBUFFER_256 256 /* Used for skb receive header */
@@ -778,6 +823,10 @@ struct wx_bus_info {
u16 device;
};
+struct wx_mbx_info {
+ u16 size;
+};
+
struct wx_thermal_sensor_data {
s16 temp;
s16 alarm_thresh;
@@ -789,13 +838,14 @@ enum wx_mac_type {
wx_mac_sp,
wx_mac_em,
wx_mac_aml,
+ wx_mac_aml40,
};
-enum sp_media_type {
- sp_media_unknown = 0,
- sp_media_fiber,
- sp_media_copper,
- sp_media_backplane
+enum wx_media_type {
+ wx_media_unknown = 0,
+ wx_media_fiber,
+ wx_media_copper,
+ wx_media_backplane
};
enum em_mac_type {
@@ -1051,6 +1101,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
+ RING_F_VMDQ,
RING_F_RSS,
RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */
@@ -1103,11 +1154,43 @@ enum wx_state {
WX_STATE_SWFW_BUSY,
WX_STATE_PTP_RUNNING,
WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_SERVICE_SCHED,
WX_STATE_NBITS /* must be last */
};
+struct vf_data_storage {
+ struct pci_dev *vfdev;
+ unsigned char vf_mac_addr[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool link_enable;
+ bool trusted;
+ int xcast_mode;
+ unsigned int vf_api;
+ bool clear_to_send;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ bool pf_set_mac;
+
+ u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 vlan_count;
+ int link_state;
+};
+
+struct vf_macvlans {
+ struct list_head mvlist;
+ int vf;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
enum wx_pf_flags {
+ WX_FLAG_MULTI_64_FUNC,
WX_FLAG_SWFW_RING,
+ WX_FLAG_VMDQ_ENABLED,
+ WX_FLAG_VLAN_PROMISC,
+ WX_FLAG_SRIOV_ENABLED,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
@@ -1115,6 +1198,8 @@ enum wx_pf_flags {
WX_FLAG_RX_HWTSTAMP_ENABLED,
WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
WX_FLAG_PTP_PPS_ENABLED,
+ WX_FLAG_NEED_LINK_CONFIG,
+ WX_FLAG_NEED_SFP_RESET,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1128,9 +1213,10 @@ struct wx {
struct pci_dev *pdev;
struct net_device *netdev;
struct wx_bus_info bus;
+ struct wx_mbx_info mbx;
struct wx_mac_info mac;
enum em_mac_type mac_type;
- enum sp_media_type media_type;
+ enum wx_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
struct wx_fc_info fc;
@@ -1151,6 +1237,9 @@ struct wx {
u8 swfw_index;
/* PHY stuff */
+ bool notify_down;
+ int adv_speed;
+ int adv_duplex;
unsigned int link;
int speed;
int duplex;
@@ -1182,6 +1271,8 @@ struct wx {
struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
struct wx_ring *rx_ring[64];
struct wx_q_vector *q_vector[64];
+ int num_rx_pools;
+ int num_rx_queues_per_pool;
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
@@ -1203,6 +1294,7 @@ struct wx {
u32 wol;
u16 bd_number;
+ bool default_up;
struct wx_hw_stats stats;
u64 tx_busy;
@@ -1211,10 +1303,16 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+ unsigned long fwd_bitmask;
u32 atr_sample_rate;
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
+ int (*setup_tc)(struct net_device *netdev, u8 tc);
void (*do_reset)(struct net_device *netdev);
int (*ptp_setup_sdp)(struct wx *wx);
@@ -1239,6 +1337,9 @@ struct wx {
struct ptp_clock_info ptp_caps;
struct kernel_hwtstamp_config tstamp_config;
struct sk_buff *ptp_tx_skb;
+
+ struct timer_list service_timer;
+ struct work_struct service_task;
};
#define WX_INTR_ALL (~0ULL)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 91b3055a5a9f..b5022c49dc5e 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -15,6 +15,8 @@
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -129,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx)
wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
+ wx->setup_tc = ngbe_setup_tc;
+ set_bit(0, &wx->fwd_bitmask);
+
return 0;
}
@@ -200,12 +206,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+static irqreturn_t __ngbe_msix_misc(struct wx *wx, u32 eicr)
{
- struct wx *wx = data;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & NGBE_PX_MISC_IC_VF_MBOX)
+ wx_msg_task(wx);
if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
wx_ptp_check_pps_event(wx);
@@ -217,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ngbe_msix_misc(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
+static irqreturn_t ngbe_misc_and_queue(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (!eicr) {
+ /* queue */
+ q_vector = wx->q_vector[0];
+ napi_schedule_irqoff(&q_vector->napi);
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_HANDLED;
+ }
+
+ return __ngbe_msix_misc(wx, eicr);
+}
+
/**
* ngbe_request_msix_irqs - Initialize MSI-X interrupts
* @wx: board private structure
@@ -249,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
- err = request_irq(wx->msix_entry->vector,
- ngbe_msix_other, 0, netdev->name, wx);
+ /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1
+ * for queue. But when num_vfs == 7, vector[1] is assigned to vf6.
+ * Misc and queue should reuse interrupt vector[0].
+ */
+ if (wx->num_vfs == 7)
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_misc_and_queue, 0, netdev->name, wx);
+ else
+ err = request_irq(wx->msix_entry->vector,
+ ngbe_msix_misc, 0, netdev->name, wx);
if (err) {
wx_err(wx, "request_irq for msix_other failed: %d\n", err);
@@ -302,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx)
struct net_device *netdev = wx->netdev;
u32 i;
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ wx->notify_down = true;
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+ wx->notify_down = false;
+
+ /* Disable all VFTE/VFRE TX/RX */
+ wx_disable_vf_rx_tx(wx);
+ }
+
/* disable all enabled rx queues */
for (i = 0; i < wx->num_rx_queues; i++)
/* this call also flushes the previous write */
@@ -324,12 +381,19 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
+static void ngbe_reset(struct wx *wx)
+{
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
+}
+
void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
- if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
- wx_ptp_reset(wx);
+ ngbe_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -352,6 +416,11 @@ void ngbe_up(struct wx *wx)
ngbe_sfp_modules_txrx_powerctl(wx, true);
phylink_start(wx->phylink);
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD);
+ if (wx->num_vfs)
+ wx_ping_all_vfs_with_link_status(wx, false);
}
/**
@@ -518,6 +587,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -596,6 +666,10 @@ static int ngbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The emerald supports up to 8 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = ngbe_driver_name;
ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
@@ -744,6 +818,7 @@ static void ngbe_remove(struct pci_dev *pdev)
struct net_device *netdev;
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
@@ -803,6 +878,7 @@ static struct pci_driver ngbe_driver = {
.suspend = ngbe_suspend,
.resume = ngbe_resume,
.shutdown = ngbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(ngbe_driver);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index ea1d7e9a91f3..c63bb6e6f405 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -9,6 +9,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -70,6 +71,8 @@ static void ngbe_mac_link_down(struct phylink_config *config,
wx->speed = SPEED_UNKNOWN;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -114,6 +117,8 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wx->last_rx_ptp_check = jiffies;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 992adbb98c7d..bb74263f0498 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -73,12 +73,14 @@
#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IC_VF_MBOX | \
NGBE_PX_MISC_IEN_GPIO)
/* Extended Interrupt Cause Read */
@@ -134,6 +136,7 @@
#define NGBE_MAX_RXD 8192
#define NGBE_MIN_RXD 128
+#define NGBE_MAX_VFS_DRV_LIMIT 7
extern char ngbe_driver_name[];
void ngbe_down(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index f74576fe7062..c757fa95e58e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -11,4 +11,5 @@ txgbe-objs := txgbe_main.o \
txgbe_phy.o \
txgbe_irq.o \
txgbe_fdir.o \
- txgbe_ethtool.o
+ txgbe_ethtool.o \
+ txgbe_aml.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
new file mode 100644
index 000000000000..7dbcf41750c1
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/phylink.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
+#include "txgbe_type.h"
+#include "txgbe_aml.h"
+#include "txgbe_hw.h"
+
+void txgbe_gpio_init_aml(struct wx *wx)
+{
+ u32 status;
+
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3);
+ wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2 | TXGBE_GPIOBIT_3);
+
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ for (int i = 0; i < 6; i++) {
+ if (status & BIT(i))
+ wr32(wx, WX_GPIO_EOI, BIT(i));
+ }
+}
+
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+
+ wr32(wx, WX_GPIO_INTMASK, 0xFF);
+ status = rd32(wx, WX_GPIO_INTSTATUS);
+ if (status & TXGBE_GPIOBIT_2) {
+ set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+ wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2);
+ wx_service_event_schedule(wx);
+ }
+ if (status & TXGBE_GPIOBIT_3) {
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ wx_service_event_schedule(wx);
+ wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_3);
+ }
+
+ wr32(wx, WX_GPIO_INTMASK, 0);
+ return IRQ_HANDLED;
+}
+
+int txgbe_test_hostif(struct wx *wx)
+{
+ struct txgbe_hic_ephy_getlink buffer;
+
+ if (wx->mac.type != wx_mac_aml)
+ return 0;
+
+ buffer.hdr.cmd = FW_PHY_GET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_getlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer)
+{
+ buffer->hdr.cmd = FW_READ_SFP_INFO_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ return wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_i2c_read),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int duplex)
+{
+ struct txgbe_hic_ephy_setlink buffer;
+
+ buffer.hdr.cmd = FW_PHY_SET_LINK_CMD;
+ buffer.hdr.buf_len = sizeof(struct txgbe_hic_ephy_setlink) -
+ sizeof(struct wx_hic_hdr);
+ buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
+ switch (speed) {
+ case SPEED_25000:
+ buffer.speed = TXGBE_LINK_SPEED_25GB_FULL;
+ break;
+ case SPEED_10000:
+ buffer.speed = TXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ }
+
+ buffer.fec_mode = TXGBE_PHY_FEC_AUTO;
+ buffer.autoneg = autoneg;
+ buffer.duplex = duplex;
+
+ return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
+ WX_HI_COMMAND_TIMEOUT, true);
+}
+
+static void txgbe_get_link_capabilities(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces))
+ wx->adv_speed = SPEED_25000;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces))
+ wx->adv_speed = SPEED_10000;
+ else
+ wx->adv_speed = SPEED_UNKNOWN;
+
+ wx->adv_duplex = wx->adv_speed == SPEED_UNKNOWN ?
+ DUPLEX_HALF : DUPLEX_FULL;
+}
+
+static void txgbe_get_phy_link(struct wx *wx, int *speed)
+{
+ u32 status;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ if (!(status & TXGBE_CFG_PORT_ST_LINK_UP))
+ *speed = SPEED_UNKNOWN;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G)
+ *speed = SPEED_25000;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G)
+ *speed = SPEED_10000;
+ else
+ *speed = SPEED_UNKNOWN;
+}
+
+int txgbe_set_phy_link(struct wx *wx)
+{
+ int speed, err;
+ u32 gpio;
+
+ /* Check RX signal */
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & TXGBE_GPIOBIT_3)
+ return -ENODEV;
+
+ txgbe_get_link_capabilities(wx);
+ if (wx->adv_speed == SPEED_UNKNOWN)
+ return -ENODEV;
+
+ txgbe_get_phy_link(wx, &speed);
+ if (speed == wx->adv_speed)
+ return 0;
+
+ err = txgbe_set_phy_link_hostif(wx, wx->adv_speed, 0, wx->adv_duplex);
+ if (err) {
+ wx_err(wx, "Failed to setup link\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE |
+ TXGBE_SFF_25GBASEER_CAPABLE |
+ TXGBE_SFF_25GBASELR_CAPABLE)) {
+ phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
+ phylink_set(modes, 10000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
+ phylink_set(modes, 10000baseLR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported SFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+ txgbe->link_port = PORT_FIBRE;
+
+ if (!linkmode_equal(txgbe->sfp_support, modes)) {
+ linkmode_copy(txgbe->sfp_support, modes);
+ phy_interface_and(txgbe->sfp_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+ }
+
+ return 0;
+}
+
+int txgbe_identify_sfp(struct wx *wx)
+{
+ struct txgbe_hic_i2c_read buffer;
+ struct txgbe_sfp_id *id;
+ int err = 0;
+ u32 gpio;
+
+ gpio = rd32(wx, WX_GPIO_EXT);
+ if (gpio & TXGBE_GPIOBIT_2)
+ return -ENODEV;
+
+ err = txgbe_identify_sfp_hostif(wx, &buffer);
+ if (err) {
+ wx_err(wx, "Failed to identify SFP module\n");
+ return err;
+ }
+
+ id = &buffer.id;
+ if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) {
+ wx_err(wx, "Invalid SFP module\n");
+ return -ENODEV;
+ }
+
+ err = txgbe_sfp_to_linkmodes(wx, id);
+ if (err)
+ return err;
+
+ if (gpio & TXGBE_GPIOBIT_3)
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+
+ return 0;
+}
+
+void txgbe_setup_link(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+
+ phy_interface_zero(txgbe->sfp_interfaces);
+ linkmode_zero(txgbe->sfp_support);
+
+ txgbe_identify_sfp(wx);
+}
+
+static void txgbe_get_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct wx *wx = phylink_to_wx(config);
+ int speed;
+
+ txgbe_get_phy_link(wx, &speed);
+ state->link = speed != SPEED_UNKNOWN;
+ state->speed = speed;
+ state->duplex = state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
+}
+
+static void txgbe_reconfig_mac(struct wx *wx)
+{
+ u32 wdg, fc;
+
+ wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ fc = rd32(wx, WX_MAC_RX_FLOW_CTRL);
+
+ wr32(wx, WX_MIS_RST, TXGBE_MIS_RST_MAC_RST(wx->bus.func));
+ /* wait for MAC reset complete */
+ usleep_range(1000, 1500);
+
+ wr32m(wx, TXGBE_MAC_MISC_CTL, TXGBE_MAC_MISC_CTL_LINK_STS_MOD,
+ TXGBE_MAC_MISC_CTL_LINK_BOTH);
+ wx_reset_mac(wx);
+
+ wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+ wr32(wx, WX_MAC_RX_FLOW_CTRL, fc);
+}
+
+static void txgbe_mac_link_up_aml(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct wx *wx = phylink_to_wx(config);
+ u32 txcfg;
+
+ wx_fc_enable(wx, tx_pause, rx_pause);
+
+ txgbe_reconfig_mac(wx);
+
+ txcfg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+
+ switch (speed) {
+ case SPEED_25000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ break;
+ case SPEED_10000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_10G;
+ break;
+ default:
+ break;
+ }
+
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+ wr32(wx, TXGBE_AML_MAC_TX_CFG, txcfg | TXGBE_AML_MAC_TX_CFG_TE);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
+}
+
+static void txgbe_mac_link_down_aml(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct wx *wx = phylink_to_wx(config);
+
+ wr32m(wx, TXGBE_AML_MAC_TX_CFG, TXGBE_AML_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
+}
+
+static void txgbe_mac_config_aml(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops txgbe_mac_ops_aml = {
+ .mac_config = txgbe_mac_config_aml,
+ .mac_link_down = txgbe_mac_link_down_aml,
+ .mac_link_up = txgbe_mac_link_up_aml,
+};
+
+int txgbe_phylink_init_aml(struct txgbe *txgbe)
+{
+ struct phylink_link_state state;
+ struct phylink_config *config;
+ struct wx *wx = txgbe->wx;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
+ int err;
+
+ config = &wx->phylink_config;
+ config->dev = &wx->netdev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_25000FD | MAC_10000FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ config->get_fixed_state = txgbe_get_link_state;
+
+ phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, phy_mode, &txgbe_mac_ops_aml);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ state.speed = SPEED_25000;
+ state.duplex = DUPLEX_FULL;
+ err = phylink_set_fixed_link(phylink, &state);
+ if (err) {
+ wx_err(wx, "Failed to set fixed link\n");
+ return err;
+ }
+
+ wx->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
new file mode 100644
index 000000000000..25d4971ca0d9
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_AML_H_
+#define _TXGBE_AML_H_
+
+void txgbe_gpio_init_aml(struct wx *wx);
+irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data);
+int txgbe_test_hostif(struct wx *wx);
+int txgbe_set_phy_link(struct wx *wx);
+int txgbe_identify_sfp(struct wx *wx);
+void txgbe_setup_link(struct wx *wx);
+int txgbe_phylink_init_aml(struct txgbe *txgbe);
+
+#endif /* _TXGBE_AML_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index 78999d484f18..a4753402660e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -12,6 +12,31 @@
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe *txgbe = wx->priv;
+ int err;
+
+ if (wx->mac.type == wx_mac_aml40)
+ return -EOPNOTSUPP;
+
+ err = wx_get_link_ksettings(netdev, cmd);
+ if (err)
+ return err;
+
+ if (wx->mac.type == wx_mac_sp)
+ return 0;
+
+ cmd->base.port = txgbe->link_port;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support);
+ linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
+
+ return 0;
+}
+
static int txgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -342,12 +367,19 @@ static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
queue = TXGBE_RDB_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
- if (ring >= wx->num_rx_queues)
+ if (!vf && ring >= wx->num_rx_queues)
+ return -EINVAL;
+ else if (vf && (vf > wx->num_vfs ||
+ ring >= wx->num_rx_queues_per_pool))
return -EINVAL;
/* Map the ring onto the absolute queue index */
- queue = wx->rx_ring[ring]->reg_idx;
+ if (!vf)
+ queue = wx->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) * wx->num_rx_queues_per_pool) + ring;
}
/* Don't allow indexes to exist outside of available space */
@@ -510,7 +542,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.get_drvinfo = wx_get_drvinfo,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = wx_get_link_ksettings,
+ .get_link_ksettings = txgbe_get_link_ksettings,
.set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
index ace1b3571012..66dbc8ec1bb6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
@@ -4,6 +4,8 @@
#ifndef _TXGBE_ETHTOOL_H_
#define _TXGBE_ETHTOOL_H_
+int txgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
void txgbe_set_ethtool_ops(struct net_device *netdev);
#endif /* _TXGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
index ef50efbaec0f..a84010828551 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
@@ -307,6 +307,7 @@ void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
{
u32 fdirm = 0, fdirtcpm = 0, flex = 0;
+ int index, offset;
/* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
@@ -352,15 +353,17 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ index = VMDQ_P(0) / 4;
+ offset = VMDQ_P(0) % 4;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
case 0x0000:
/* Mask Flex Bytes */
- flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
+ flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK << (offset * 8);
break;
case 0xFFFF:
break;
@@ -368,7 +371,7 @@ int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
wx_err(wx, "Error on flexible byte mask\n");
return -EINVAL;
}
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = ntohs(input_mask->formatted.dst_port);
@@ -516,14 +519,16 @@ static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
static void txgbe_init_fdir_signature(struct wx *wx)
{
u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+ int index = VMDQ_P(0) / 4;
+ int offset = VMDQ_P(0) % 4;
u32 flex = 0;
- flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
- flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index));
+ flex &= ~(TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 << (offset * 8));
flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
- TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
- wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)) << (offset * 8);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(index), flex);
/* Continue setup of fdirctrl register bits:
* Move the flexible bytes to use the ethertype - shift 6 words
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 4b9921b7bb11..e551ae0e2069 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -99,9 +99,15 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
}
local_buffer = eeprom_ptrs;
- for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
+ for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++) {
+ if (wx->mac.type == wx_mac_aml) {
+ if (i >= TXGBE_EEPROM_I2C_SRART_PTR &&
+ i < TXGBE_EEPROM_I2C_END_PTR)
+ local_buffer[i] = 0xffff;
+ }
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
+ }
kvfree(eeprom_ptrs);
@@ -182,7 +188,7 @@ int txgbe_reset_hw(struct wx *wx)
if (status != 0)
return status;
- if (wx->media_type != sp_media_copper) {
+ if (wx->media_type != wx_media_copper) {
u32 val;
val = WX_MIS_RST_LAN_RST(wx->bus.func);
@@ -212,7 +218,7 @@ int txgbe_reset_hw(struct wx *wx)
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
wx_init_rx_addrs(wx);
pci_set_master(wx->pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 8658a51ee810..20b9a28bcb55 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -6,10 +6,13 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
+#include "txgbe_aml.h"
/**
* txgbe_irq_enable - Enable default interrupt generation settings
@@ -18,7 +21,14 @@
**/
void txgbe_irq_enable(struct wx *wx, bool queues)
{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+ u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
+
+ if (wx->mac.type == wx_mac_aml) {
+ misc_ien |= TXGBE_PX_MISC_GPIO;
+ txgbe_gpio_init_aml(wx);
+ }
+
+ wr32(wx, WX_PX_MISC_IEN, misc_ien);
/* unmask interrupt */
wx_intr_enable(wx, TXGBE_INTR_MISC);
@@ -80,6 +90,14 @@ static int txgbe_request_link_irq(struct txgbe *txgbe)
IRQF_ONESHOT, "txgbe-link-irq", txgbe);
}
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler_aml,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
static const struct irq_chip txgbe_irq_chip = {
.name = "txgbe-misc-irq",
};
@@ -109,8 +127,15 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
struct wx *wx = txgbe->wx;
u32 eicr;
- if (wx->pdev->msix_enabled)
+ if (wx->pdev->msix_enabled) {
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ txgbe->eicr = eicr;
+ if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) {
+ wx_msg_task(txgbe->wx);
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ }
return IRQ_WAKE_THREAD;
+ }
eicr = wx_misc_isb(wx, WX_ISB_VEC0);
if (!eicr) {
@@ -129,6 +154,8 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
q_vector = wx->q_vector[0];
napi_schedule_irqoff(&q_vector->napi);
+ txgbe->eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
return IRQ_WAKE_THREAD;
}
@@ -140,13 +167,22 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
unsigned int sub_irq;
u32 eicr;
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ eicr = txgbe->eicr;
if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
TXGBE_PX_MISC_ETH_AN)) {
sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
handle_nested_irq(sub_irq);
nhandled++;
}
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (unlikely(eicr & TXGBE_PX_MISC_IC_TIMESYNC)) {
+ wx_ptp_check_pps_event(wx);
+ nhandled++;
+ }
wx_intr_enable(wx, TXGBE_INTR_MISC);
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
@@ -166,9 +202,12 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
- if (txgbe->wx->mac.type == wx_mac_aml)
+ if (txgbe->wx->mac.type == wx_mac_aml40)
return;
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ free_irq(txgbe->gpio_irq, txgbe);
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
@@ -180,12 +219,12 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- if (wx->mac.type == wx_mac_aml)
+ if (wx->mac.type == wx_mac_aml40)
goto skip_sp_irq;
- txgbe->misc.nirqs = 1;
- txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
- &txgbe_misc_irq_domain_ops, txgbe);
+ txgbe->misc.nirqs = TXGBE_IRQ_MAX;
+ txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
if (!txgbe->misc.domain)
return -ENOMEM;
@@ -212,11 +251,20 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_msic_irq;
+ if (wx->mac.type == wx_mac_sp)
+ goto skip_sp_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_link_irq;
+
skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
+free_link_irq:
+ free_irq(txgbe->link_irq, txgbe);
free_msic_irq:
free_irq(txgbe->misc.irq, txgbe);
del_misc_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 38206a46693b..f3d2778b8e35 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/phylink.h>
+#include <net/udp_tunnel.h>
#include <net/ip.h>
#include <linux/if_vlan.h>
@@ -15,9 +16,12 @@
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_mbx.h"
+#include "../libwx/wx_sriov.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_aml.h"
#include "txgbe_irq.h"
#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
@@ -85,9 +89,62 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
+static void txgbe_sfp_detection_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags))
+ return;
+
+ /* wait for SFP module ready */
+ msleep(200);
+
+ err = txgbe_identify_sfp(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+}
+
+static void txgbe_link_config_subtask(struct wx *wx)
+{
+ int err;
+
+ if (!test_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags))
+ return;
+
+ err = txgbe_set_phy_link(wx);
+ if (err)
+ return;
+
+ clear_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
+}
+
+/**
+ * txgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void txgbe_service_task(struct work_struct *work)
+{
+ struct wx *wx = container_of(work, struct wx, service_task);
+
+ txgbe_sfp_detection_subtask(wx);
+ txgbe_link_config_subtask(wx);
+
+ wx_service_event_complete(wx);
+}
+
+static void txgbe_init_service(struct wx *wx)
+{
+ timer_setup(&wx->service_timer, wx_service_timer, 0);
+ INIT_WORK(&wx->service_task, txgbe_service_task);
+ clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
+}
+
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ u32 reg;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -96,17 +153,26 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- if (wx->mac.type == wx_mac_aml) {
- u32 reg;
-
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
- reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
wr32(wx, WX_MAC_TX_CFG, reg);
txgbe_enable_sec_tx_path(wx);
netif_carrier_on(wx->netdev);
- } else {
+ break;
+ case wx_mac_aml:
+ /* Enable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, 0);
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
+ break;
+ case wx_mac_sp:
phylink_start(wx->phylink);
+ break;
+ default:
+ break;
}
/* clear any pending interrupts, may auto mask */
@@ -117,6 +183,13 @@ static void txgbe_up_complete(struct wx *wx)
/* enable transmits */
netif_tx_start_all_queues(netdev);
+ mod_timer(&wx->service_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD,
+ WX_CFG_PORT_CTL_PFRSTD);
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
}
static void txgbe_reset(struct wx *wx)
@@ -159,12 +232,24 @@ static void txgbe_disable_device(struct wx *wx)
wx_irq_disable(wx);
wx_napi_disable_all(wx);
+ timer_delete_sync(&wx->service_timer);
+
if (wx->bus.func < 2)
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
wx_err(wx, "%s: invalid bus lan id %d\n",
__func__, wx->bus.func);
+ if (wx->num_vfs) {
+ /* Clear EITR Select mapping */
+ wr32(wx, WX_PX_ITRSEL, 0);
+ /* Mark all the VFs as inactive */
+ for (i = 0; i < wx->num_vfs; i++)
+ wx->vfinfo[i].clear_to_send = 0;
+ /* update setting rx tx for all active vfs */
+ wx_set_all_vfs(wx);
+ }
+
if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
@@ -188,10 +273,22 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- if (wx->mac.type == wx_mac_aml)
+
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
netif_carrier_off(wx->netdev);
- else
+ break;
+ case wx_mac_aml:
+ phylink_stop(wx->phylink);
+ /* Disable TX laser */
+ wr32m(wx, WX_GPIO_DR, TXGBE_GPIOBIT_1, TXGBE_GPIOBIT_1);
+ break;
+ case wx_mac_sp:
phylink_stop(wx->phylink);
+ break;
+ default:
+ break;
+ }
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -221,9 +318,11 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_AML5110:
case TXGBE_DEV_ID_AML5025:
case TXGBE_DEV_ID_AML5125:
+ wx->mac.type = wx_mac_aml;
+ break;
case TXGBE_DEV_ID_AML5040:
case TXGBE_DEV_ID_AML5140:
- wx->mac.type = wx_mac_aml;
+ wx->mac.type = wx_mac_aml40;
break;
default:
wx->mac.type = wx_mac_unknown;
@@ -232,25 +331,25 @@ static void txgbe_init_type_code(struct wx *wx)
switch (device_type) {
case TXGBE_ID_SFP:
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
break;
case TXGBE_ID_XAUI:
case TXGBE_ID_SGMII:
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
case TXGBE_ID_KR_KX_KX4:
case TXGBE_ID_MAC_XAUI:
case TXGBE_ID_MAC_SGMII:
- wx->media_type = sp_media_backplane;
+ wx->media_type = wx_media_backplane;
break;
case TXGBE_ID_SFI_XAUI:
if (wx->bus.func == 0)
- wx->media_type = sp_media_fiber;
+ wx->media_type = wx_media_fiber;
else
- wx->media_type = sp_media_copper;
+ wx->media_type = wx_media_copper;
break;
default:
- wx->media_type = sp_media_unknown;
+ wx->media_type = wx_media_unknown;
break;
}
}
@@ -264,13 +363,13 @@ static int txgbe_sw_init(struct wx *wx)
u16 msix_count = 0;
int err;
- wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
- wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
- wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
- wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE;
- wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
- wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
+ wx->mac.num_rar_entries = TXGBE_RAR_ENTRIES;
+ wx->mac.max_tx_queues = TXGBE_MAX_TXQ;
+ wx->mac.max_rx_queues = TXGBE_MAX_RXQ;
+ wx->mac.mcft_size = TXGBE_MC_TBL_SIZE;
+ wx->mac.vft_size = TXGBE_VFT_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_TDB_PB_SZ;
/* PCI config space info */
err = wx_sw_init(wx);
@@ -299,6 +398,7 @@ static int txgbe_sw_init(struct wx *wx)
wx->configure_fdir = txgbe_configure_fdir;
set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
@@ -307,17 +407,21 @@ static int txgbe_sw_init(struct wx *wx)
/* set default ring sizes */
wx->tx_ring_count = TXGBE_DEFAULT_TXD;
wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+ wx->mbx.size = WX_VXMAILBOX_SIZE;
/* set default work limits */
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->setup_tc = txgbe_setup_tc;
wx->do_reset = txgbe_do_reset;
+ set_bit(0, &wx->fwd_bitmask);
switch (wx->mac.type) {
case wx_mac_sp:
break;
case wx_mac_aml:
+ case wx_mac_aml40:
set_bit(WX_FLAG_SWFW_RING, wx->flags);
wx->swfw_index = 0;
break;
@@ -516,6 +620,39 @@ void txgbe_do_reset(struct net_device *netdev)
txgbe_reset(wx);
}
+static int txgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct udp_tunnel_info ti;
+
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ switch (ti.type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ wr32(wx, TXGBE_CFG_VXLAN, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_VXLAN_GPE:
+ wr32(wx, TXGBE_CFG_VXLAN_GPE, ntohs(ti.port));
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ wr32(wx, TXGBE_CFG_GENEVE, ntohs(ti.port));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info txgbe_udp_tunnels = {
+ .sync_table = txgbe_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -524,6 +661,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
.ndo_fix_features = wx_fix_features,
+ .ndo_features_check = wx_features_check,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -604,9 +742,14 @@ static int txgbe_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ /* The sapphire supports up to 63 VFs per pf, but physical
+ * function also need one pool for basic networking.
+ */
+ pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT);
wx->driver_name = txgbe_driver_name;
txgbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &txgbe_netdev_ops;
+ netdev->udp_tunnel_nic_info = &txgbe_udp_tunnels;
/* setup the private structure */
err = txgbe_sw_init(wx);
@@ -652,6 +795,7 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
netdev->hw_features |= NETIF_F_GRO;
netdev->features |= NETIF_F_GRO;
+ netdev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -673,9 +817,11 @@ static int txgbe_probe(struct pci_dev *pdev,
eth_hw_addr_set(netdev, wx->mac.perm_addr);
wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+ txgbe_init_service(wx);
+
err = wx_init_interrupt_scheme(wx);
if (err)
- goto err_free_mac_table;
+ goto err_cancel_service;
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
@@ -718,6 +864,13 @@ static int txgbe_probe(struct pci_dev *pdev,
if (etrack_id < 0x20010)
dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
+ err = txgbe_test_hostif(wx);
+ if (err != 0) {
+ dev_err(&pdev->dev, "Mismatched Firmware version\n");
+ err = -EIO;
+ goto err_release_hw;
+ }
+
txgbe = devm_kzalloc(&pdev->dev, sizeof(*txgbe), GFP_KERNEL);
if (!txgbe) {
err = -ENOMEM;
@@ -768,6 +921,9 @@ err_free_misc_irq:
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
+err_cancel_service:
+ timer_delete_sync(&wx->service_timer);
+ cancel_work_sync(&wx->service_task);
err_free_mac_table:
kfree(wx->rss_key);
kfree(wx->mac_table);
@@ -794,7 +950,10 @@ static void txgbe_remove(struct pci_dev *pdev)
struct txgbe *txgbe = wx->priv;
struct net_device *netdev;
+ cancel_work_sync(&wx->service_task);
+
netdev = wx->netdev;
+ wx_disable_sriov(wx);
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
@@ -817,11 +976,12 @@ static struct pci_driver txgbe_driver = {
.probe = txgbe_probe,
.remove = txgbe_remove,
.shutdown = txgbe_shutdown,
+ .sriov_configure = wx_pci_sriov_configure,
};
module_pci_driver(txgbe_driver);
MODULE_DEVICE_TABLE(pci, txgbe_pci_tbl);
MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@trustnetic.com>");
-MODULE_DESCRIPTION("WangXun(R) 10 Gigabit PCI Express Network Driver");
+MODULE_DESCRIPTION("WangXun(R) 10/25/40 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 85f022ceef4f..03f1b9bc604d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -16,8 +16,11 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "../libwx/wx_ptp.h"
+#include "../libwx/wx_sriov.h"
+#include "../libwx/wx_mbx.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
+#include "txgbe_aml.h"
#include "txgbe_phy.h"
#include "txgbe_hw.h"
@@ -163,7 +166,7 @@ static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *confi
struct wx *wx = phylink_to_wx(config);
struct txgbe *txgbe = wx->priv;
- if (wx->media_type != sp_media_copper)
+ if (wx->media_type != wx_media_copper)
return txgbe->pcs;
return NULL;
@@ -184,6 +187,8 @@ static void txgbe_mac_link_down(struct phylink_config *config,
wx->speed = SPEED_UNKNOWN;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going down */
+ wx_ping_all_vfs_with_link_status(wx, false);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -225,6 +230,8 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wx->last_rx_ptp_check = jiffies;
if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
wx_ptp_reset_cyclecounter(wx);
+ /* ping all the active vfs to let them know we are going up */
+ wx_ping_all_vfs_with_link_status(wx, true);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -272,7 +279,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
- if (wx->media_type == sp_media_copper) {
+ if (wx->media_type == wx_media_copper) {
phy_mode = PHY_INTERFACE_MODE_XAUI;
__set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
} else {
@@ -312,7 +319,10 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_pcs_change(txgbe->pcs, up);
+ if (txgbe->pcs)
+ phylink_pcs_change(txgbe->pcs, up);
+ else
+ phylink_mac_change(wx->phylink, up);
return IRQ_HANDLED;
}
@@ -567,11 +577,18 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
- if (wx->mac.type == wx_mac_aml)
+ switch (wx->mac.type) {
+ case wx_mac_aml40:
return 0;
-
- if (txgbe->wx->media_type == sp_media_copper)
- return txgbe_ext_phy_init(txgbe);
+ case wx_mac_aml:
+ return txgbe_phylink_init_aml(txgbe);
+ case wx_mac_sp:
+ if (wx->media_type == wx_media_copper)
+ return txgbe_ext_phy_init(txgbe);
+ break;
+ default:
+ break;
+ }
ret = txgbe_swnodes_register(txgbe);
if (ret) {
@@ -634,13 +651,21 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
- if (txgbe->wx->mac.type == wx_mac_aml)
+ switch (txgbe->wx->mac.type) {
+ case wx_mac_aml40:
return;
-
- if (txgbe->wx->media_type == sp_media_copper) {
- phylink_disconnect_phy(txgbe->wx->phylink);
+ case wx_mac_aml:
phylink_destroy(txgbe->wx->phylink);
return;
+ case wx_mac_sp:
+ if (txgbe->wx->media_type == wx_media_copper) {
+ phylink_disconnect_phy(txgbe->wx->phylink);
+ phylink_destroy(txgbe->wx->phylink);
+ return;
+ }
+ break;
+ default:
+ break;
}
platform_device_unregister(txgbe->sfp_dev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 3938985355ed..a32b19d71ea2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -8,4 +8,4 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
-#endif /* _TXGBE_NODE_H_ */
+#endif /* _TXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 9c1c26234cad..42ec815159e8 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -6,6 +6,8 @@
#include <linux/property.h>
#include <linux/irq.h>
+#include <linux/phy.h>
+#include "../libwx/wx_type.h"
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -50,6 +52,8 @@
/**************** SP Registers ****************************/
/* chip control Registers */
+#define TXGBE_MIS_RST 0x1000C
+#define TXGBE_MIS_RST_MAC_RST(_i) BIT(20 - (_i) * 3)
#define TXGBE_MIS_PRB_CTL 0x10010
#define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i))
/* FMGR Registers */
@@ -62,6 +66,11 @@
#define TXGBE_TS_CTL 0x10300
#define TXGBE_TS_CTL_EVAL_MD BIT(31)
+/* MAC Misc Registers */
+#define TXGBE_MAC_MISC_CTL 0x11F00
+#define TXGBE_MAC_MISC_CTL_LINK_STS_MOD BIT(0)
+#define TXGBE_MAC_MISC_CTL_LINK_PCS FIELD_PREP(BIT(0), 0)
+#define TXGBE_MAC_MISC_CTL_LINK_BOTH FIELD_PREP(BIT(0), 1)
/* GPIO register bit */
#define TXGBE_GPIOBIT_0 BIT(0) /* I:tx fault */
#define TXGBE_GPIOBIT_1 BIT(1) /* O:tx disabled */
@@ -73,19 +82,27 @@
/* Extended Interrupt Enable Set */
#define TXGBE_PX_MISC_ETH_LKDN BIT(8)
#define TXGBE_PX_MISC_DEV_RST BIT(10)
+#define TXGBE_PX_MISC_IC_TIMESYNC BIT(11)
#define TXGBE_PX_MISC_ETH_EVENT BIT(17)
#define TXGBE_PX_MISC_ETH_LK BIT(18)
#define TXGBE_PX_MISC_ETH_AN BIT(19)
#define TXGBE_PX_MISC_INT_ERR BIT(20)
+#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23)
#define TXGBE_PX_MISC_GPIO BIT(26)
#define TXGBE_PX_MISC_IEN_MASK \
(TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \
TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \
- TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR)
+ TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \
+ TXGBE_PX_MISC_IC_VF_MBOX | TXGBE_PX_MISC_IC_TIMESYNC)
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0)
+#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3)
+#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4)
+#define TXGBE_CFG_VXLAN 0x14410
+#define TXGBE_CFG_VXLAN_GPE 0x14414
+#define TXGBE_CFG_GENEVE 0x14418
/* I2C registers */
#define TXGBE_I2C_BASE 0x14900
@@ -146,8 +163,11 @@
/*************************** Amber Lite Registers ****************************/
#define TXGBE_PX_PF_BME 0x4B8
#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_TE BIT(0)
#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
-#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_40G FIELD_PREP(GENMASK(30, 27), 0)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G FIELD_PREP(GENMASK(30, 27), 2)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_10G FIELD_PREP(GENMASK(30, 27), 8)
#define TXGBE_RDM_RSC_CTL 0x1200C
#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
@@ -158,6 +178,8 @@
#define TXGBE_EEPROM_VERSION_L 0x1D
#define TXGBE_EEPROM_VERSION_H 0x1E
#define TXGBE_ISCSI_BOOT_CONFIG 0x07
+#define TXGBE_EEPROM_I2C_SRART_PTR 0x580
+#define TXGBE_EEPROM_I2C_END_PTR 0x800
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
@@ -166,13 +188,15 @@
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-#define TXGBE_SP_MAX_TX_QUEUES 128
-#define TXGBE_SP_MAX_RX_QUEUES 128
-#define TXGBE_SP_RAR_ENTRIES 128
-#define TXGBE_SP_MC_TBL_SIZE 128
-#define TXGBE_SP_VFT_TBL_SIZE 128
-#define TXGBE_SP_RX_PB_SIZE 512
-#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_TXQ 128
+#define TXGBE_MAX_RXQ 128
+#define TXGBE_RAR_ENTRIES 128
+#define TXGBE_MC_TBL_SIZE 128
+#define TXGBE_VFT_TBL_SIZE 128
+#define TXGBE_RX_PB_SIZE 512
+#define TXGBE_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+
+#define TXGBE_MAX_VFS_DRV_LIMIT 63
#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
@@ -263,7 +287,7 @@ struct txgbe_fdir_filter {
struct hlist_node fdir_node;
union txgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
/* TX/RX descriptor defines */
@@ -290,6 +314,72 @@ void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
void txgbe_do_reset(struct net_device *netdev);
+#define TXGBE_LINK_SPEED_10GB_FULL 4
+#define TXGBE_LINK_SPEED_25GB_FULL 0x10
+
+#define TXGBE_SFF_IDENTIFIER_SFP 0x3
+#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4
+#define TXGBE_SFF_FCPI4_LIMITING 0x3
+#define TXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define TXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define TXGBE_SFF_25GBASESR_CAPABLE 0x2
+#define TXGBE_SFF_25GBASELR_CAPABLE 0x3
+#define TXGBE_SFF_25GBASEER_CAPABLE 0x4
+#define TXGBE_SFF_25GBASECR_91FEC 0xB
+#define TXGBE_SFF_25GBASECR_74FEC 0xC
+#define TXGBE_SFF_25GBASECR_NOFEC 0xD
+
+#define TXGBE_PHY_FEC_RS BIT(0)
+#define TXGBE_PHY_FEC_BASER BIT(1)
+#define TXGBE_PHY_FEC_OFF BIT(2)
+#define TXGBE_PHY_FEC_AUTO (TXGBE_PHY_FEC_OFF | \
+ TXGBE_PHY_FEC_BASER |\
+ TXGBE_PHY_FEC_RS)
+
+#define FW_PHY_GET_LINK_CMD 0xC0
+#define FW_PHY_SET_LINK_CMD 0xC1
+#define FW_READ_SFP_INFO_CMD 0xC5
+
+struct txgbe_sfp_id {
+ u8 identifier; /* A0H 0x00 */
+ u8 com_1g_code; /* A0H 0x06 */
+ u8 com_10g_code; /* A0H 0x03 */
+ u8 com_25g_code; /* A0H 0x24 */
+ u8 cable_spec; /* A0H 0x3C */
+ u8 cable_tech; /* A0H 0x08 */
+ u8 vendor_oui0; /* A0H 0x25 */
+ u8 vendor_oui1; /* A0H 0x26 */
+ u8 vendor_oui2; /* A0H 0x27 */
+ u8 reserved[3];
+};
+
+struct txgbe_hic_i2c_read {
+ struct wx_hic_hdr hdr;
+ struct txgbe_sfp_id id;
+};
+
+struct txgbe_hic_ephy_setlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 fec_mode;
+ u8 resv[4];
+};
+
+struct txgbe_hic_ephy_getlink {
+ struct wx_hic_hdr hdr;
+ u8 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 flow_ctl;
+ u8 power;
+ u8 fec_mode;
+ u8 resv[6];
+};
+
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
.name = _NAME, \
@@ -327,6 +417,7 @@ struct txgbe_nodes {
enum txgbe_misc_irqs {
TXGBE_IRQ_LINK = 0,
+ TXGBE_IRQ_GPIO,
TXGBE_IRQ_MAX
};
@@ -348,12 +439,19 @@ struct txgbe {
struct clk *clk;
struct gpio_chip *gpio;
unsigned int link_irq;
+ unsigned int gpio_irq;
+ u32 eicr;
/* flow director */
struct hlist_head fdir_filter_list;
union txgbe_atr_input fdir_mask;
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
+
+ DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u8 link_port;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 054abf283ab3..6011d7eae0c7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -880,7 +880,7 @@ static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
dev_consume_skb_any(skbuf_dma->skb);
netif_txq_completed_wake(txq, 1, len,
CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- 2 * MAX_SKB_FRAGS);
+ 2);
}
/**
@@ -914,7 +914,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
dma_dev = lp->tx_chan->device;
sg_len = skb_shinfo(skb)->nr_frags + 1;
- if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
+ if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
netif_stop_queue(ndev);
if (net_ratelimit())
netdev_warn(ndev, "TX ring unexpectedly full\n");
@@ -964,7 +964,7 @@ axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
txq = skb_get_tx_queue(lp->ndev, skb);
netdev_tx_sent_queue(txq, skb->len);
netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
- MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+ 1, 2);
dmaengine_submit(dma_tx_desc);
dma_async_issue_pending(lp->tx_chan);
@@ -2980,7 +2980,7 @@ static int axienet_probe(struct platform_device *pdev)
}
}
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
- dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n");
ret = -EINVAL;
goto cleanup_clk;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index a2ab1c150822..e1e7f65553e7 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -394,16 +394,20 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
__raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
}
-static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config cfg;
struct ixp46x_ts_regs *regs;
struct port *port = netdev_priv(netdev);
int ret;
int ch;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
@@ -412,10 +416,10 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
ch = PORT2CHANNEL(port);
regs = port->timesync_regs;
- if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ if (cfg->tx_type != HWTSTAMP_TX_OFF && cfg->tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
port->hwts_rx_en = 0;
break;
@@ -431,39 +435,45 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
return -ERANGE;
}
- port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
+ port->hwts_tx_en = cfg->tx_type == HWTSTAMP_TX_ON;
/* Clear out any old time stamps. */
__raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
&regs->channel[ch].ch_event);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
-static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+static int ixp4xx_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg)
{
- struct hwtstamp_config cfg;
struct port *port = netdev_priv(netdev);
- cfg.flags = 0;
- cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ if (!cpu_is_ixp46x())
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ cfg->flags = 0;
+ cfg->tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (port->hwts_rx_en) {
case 0:
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ cfg->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case PTP_SLAVE_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case PTP_MASTER_MODE:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
@@ -985,21 +995,6 @@ static void eth_set_mcast_list(struct net_device *dev)
}
-static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
-{
- if (!netif_running(dev))
- return -EINVAL;
-
- if (cpu_is_ixp46x()) {
- if (cmd == SIOCSHWTSTAMP)
- return hwtstamp_set(dev, req);
- if (cmd == SIOCGHWTSTAMP)
- return hwtstamp_get(dev, req);
- }
-
- return phy_mii_ioctl(dev->phydev, req, cmd);
-}
-
/* ethtool support */
static void ixp4xx_get_drvinfo(struct net_device *dev,
@@ -1433,9 +1428,11 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_change_mtu = ixp4xx_eth_change_mtu,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
- .ndo_eth_ioctl = eth_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_hwtstamp_get = ixp4xx_hwtstamp_get,
+ .ndo_hwtstamp_set = ixp4xx_hwtstamp_set,
};
static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 66e38ce9cd1d..ffc15a432689 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1946,22 +1946,14 @@ static __net_init int geneve_init_net(struct net *net)
return 0;
}
-static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit geneve_exit_rtnl_net(struct net *net,
+ struct list_head *dev_to_kill)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
- geneve_dellink(geneve->dev, head);
-}
-
-static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
-{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, dev_to_kill);
+ geneve_dellink(geneve->dev, dev_to_kill);
}
static void __net_exit geneve_exit_net(struct net *net)
@@ -1973,7 +1965,7 @@ static void __net_exit geneve_exit_net(struct net *net)
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit_rtnl = geneve_exit_rtnl_net,
.exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index ef793607890d..d4dec741c7f4 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -2475,23 +2475,19 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
+static void __net_exit gtp_net_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
- struct net *net;
-
- list_for_each_entry(net, net_list, exit_list) {
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp, *gtp_next;
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp, *gtp_next;
- list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, dev_to_kill);
- }
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
+ .exit_rtnl = gtp_net_exit_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9e366f275406..5fda7a0fcce0 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1074,7 +1074,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
return 0;
case HDLCDRVCTL_DRIVERNAME:
- strscpy_pad(hi.data.drivername, "baycom_epp", sizeof(hi.data.drivername));
+ strscpy_pad(hi.data.drivername, "baycom_epp");
break;
case HDLCDRVCTL_GETMODE:
@@ -1091,8 +1091,7 @@ static int baycom_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
return baycom_setmode(bc, hi.data.modename);
case HDLCDRVCTL_MODELIST:
- strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x",
- sizeof(hi.data.modename));
+ strscpy_pad(hi.data.modename, "intclk,extclk,intmodem,extmodem,divider=x");
break;
case HDLCDRVCTL_MODEMPARMASK:
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 70f7cb383228..cb6f5482d203 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
u8 cp_partial; /* partial copy into send buffer */
u8 rmsg_size; /* RNDIS header and PPI size */
- u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
u8 page_buf_cnt;
u16 q_idx;
@@ -893,6 +892,18 @@ struct nvsp_message {
sizeof(struct nvsp_message))
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
+ * Typically it's the max SKB fragments plus 2 for the rndis packet and the
+ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
+ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
+ * in a GPA direct packet sent to netvsp over VMBus.
+ */
+#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
+#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
+#else
+#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
+#endif
+
/* Estimated requestor size:
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d6f5b9ea3109..720104661d7f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -953,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ pend_size;
int i;
u32 padding = 0;
- u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
u32 remain;
/* Add padding */
@@ -1055,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
return 0;
}
+/* Build an "array" of mpb entries describing the data to be transferred
+ * over VMBus. After the desc header fields, each "array" entry is variable
+ * size, and each entry starts after the end of the previous entry. The
+ * "offset" and "len" fields for each entry imply the size of the entry.
+ *
+ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
+ * uses that granularity, even if the system page size of the guest is larger.
+ * Each entry in the input "pb" array must describe a contiguous range of
+ * guest physical memory so that the pfns are sequential if the range crosses
+ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
+ */
+static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
+ u32 page_buffer_count,
+ struct vmbus_packet_mpb_array *desc,
+ u32 *desc_size)
+{
+ struct hv_mpb_array *mpb_entry = &desc->range;
+ int i, j;
+
+ for (i = 0; i < page_buffer_count; i++) {
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
+
+ mpb_entry->offset = offset;
+ mpb_entry->len = len;
+
+ for (j = 0; j < HVPFN_UP(offset + len); j++)
+ mpb_entry->pfn_array[j] = pb[i].pfn + j;
+
+ mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
+ }
+
+ desc->rangecount = page_buffer_count;
+ *desc_size = (char *)mpb_entry - (char *)desc;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -1097,8 +1132,11 @@ static inline int netvsc_send_pkt(
packet->dma_range = NULL;
if (packet->page_buf_cnt) {
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 desc_size;
+
if (packet->cp_partial)
- pb += packet->rmsg_pgcnt;
+ pb++;
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
if (ret) {
@@ -1106,11 +1144,12 @@ static inline int netvsc_send_pkt(
goto exit;
}
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pb, packet->page_buf_cnt,
- &nvmsg, sizeof(nvmsg),
- req_id);
-
+ netvsc_build_mpb_array(pb, packet->page_buf_cnt,
+ (struct vmbus_packet_mpb_array *)&desc,
+ &desc_size);
+ ret = vmbus_sendpacket_mpb_desc(out_channel,
+ (struct vmbus_packet_mpb_array *)&desc,
+ desc_size, &nvmsg, sizeof(nvmsg), req_id);
if (ret)
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
@@ -1259,7 +1298,7 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = section_index;
if (packet->cp_partial) {
- packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->page_buf_cnt--;
packet->total_data_buflen = msd_len + packet->rmsg_size;
} else {
packet->page_buf_cnt = 0;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c51b318b8a72..14a0d04e21ae 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -326,43 +326,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
- struct hv_page_buffer *pb)
-{
- int j = 0;
-
- hvpfn += offset >> HV_HYP_PAGE_SHIFT;
- offset = offset & ~HV_HYP_PAGE_MASK;
-
- while (len > 0) {
- unsigned long bytes;
-
- bytes = HV_HYP_PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
- pb[j].pfn = hvpfn;
- pb[j].offset = offset;
- pb[j].len = bytes;
-
- offset += bytes;
- len -= bytes;
-
- if (offset == HV_HYP_PAGE_SIZE && len) {
- hvpfn++;
- offset = 0;
- j++;
- }
- }
-
- return j + 1;
-}
-
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 slots_used = 0;
- char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
@@ -371,28 +338,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
- offset_in_hvpage(hdr),
- len,
- &pb[slots_used]);
+ pb[0].offset = offset_in_hvpage(hdr);
+ pb[0].len = len;
+ pb[0].pfn = virt_to_hvpfn(hdr);
packet->rmsg_size = len;
- packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_hvpfn(data),
- offset_in_hvpage(data),
- skb_headlen(skb),
- &pb[slots_used]);
+ pb[1].offset = offset_in_hvpage(skb->data);
+ pb[1].len = skb_headlen(skb);
+ pb[1].pfn = virt_to_hvpfn(skb->data);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct hv_page_buffer *cur_pb = &pb[i + 2];
+ u64 pfn = page_to_hvpfn(skb_frag_page(frag));
+ u32 offset = skb_frag_off(frag);
- slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
- skb_frag_off(frag),
- skb_frag_size(frag),
- &pb[slots_used]);
+ cur_pb->offset = offset_in_hvpage(offset);
+ cur_pb->len = skb_frag_size(frag);
+ cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
}
- return slots_used;
+ return frags + 2;
}
static int count_skb_frag_slots(struct sk_buff *skb)
@@ -483,7 +449,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
struct net_device *vf_netdev;
u32 rndis_msg_size;
u32 hash;
- struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+ struct hv_page_buffer pb[MAX_DATA_RANGES];
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
@@ -1405,7 +1371,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
int err;
err = eth_prepare_mac_addr_change(ndev, p);
@@ -1421,12 +1387,12 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return err;
}
- err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
+ err = rndis_filter_set_device_mac(nvdev, addr->__data);
if (!err) {
eth_commit_mac_addr_change(ndev, p);
} else if (vf_netdev) {
/* rollback change on VF */
- memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
+ memcpy(addr->__data, ndev->dev_addr, ETH_ALEN);
dev_set_mac_address(vf_netdev, addr, NULL);
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 82747dfacd70..9e73959e61ee 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
struct hv_netvsc_packet *packet;
- struct hv_page_buffer page_buf[2];
- struct hv_page_buffer *pb = page_buf;
+ struct hv_page_buffer pb;
int ret;
/* Setup the packet to send it */
@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
- pb[0].pfn = virt_to_phys(&req->request_msg) >>
- HV_HYP_PAGE_SHIFT;
- pb[0].len = req->request_msg.msg_len;
- pb[0].offset = offset_in_hvpage(&req->request_msg);
-
- /* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
- packet->page_buf_cnt++;
- pb[0].len = HV_HYP_PAGE_SIZE -
- pb[0].offset;
- pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> HV_HYP_PAGE_SHIFT;
- pb[1].offset = 0;
- pb[1].len = req->request_msg.msg_len -
- pb[0].len;
- }
+ pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
+ pb.len = req->request_msg.msg_len;
+ pb.offset = offset_in_hvpage(&req->request_msg);
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
+ ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
rcu_read_unlock_bh();
return ret;
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index e902d731776d..65dba4729155 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -493,7 +493,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index f632aab56f4c..315e617a8eeb 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -374,7 +374,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index c1428483ca34..f5d66779c2fb 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -367,7 +367,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 2c7e8cb429b9..f5ed5d745aeb 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -340,7 +340,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00002000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 57dc78c526b0..730d8c43a45c 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -418,7 +418,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index 41f212209993..5e1d9049c62b 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -360,7 +360,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146a8000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 4eb9c909d5b3..da472a2a2e29 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -416,7 +416,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x146bd000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c
index 050580c99b65..bc5722e4b053 100644
--- a/drivers/net/ipa/data/ipa_data-v5.0.c
+++ b/drivers/net/ipa/data/ipa_data-v5.0.c
@@ -442,7 +442,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00003000,
- .smem_id = 497,
.smem_size = 0x00009000,
};
diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c
index 0e6663e22533..741ae21d9d78 100644
--- a/drivers/net/ipa/data/ipa_data-v5.5.c
+++ b/drivers/net/ipa/data/ipa_data-v5.5.c
@@ -448,7 +448,6 @@ static const struct ipa_mem_data ipa_mem_data = {
.local = ipa_mem_local_data,
.imem_addr = 0x14688000,
.imem_size = 0x00002000,
- .smem_id = 497,
.smem_size = 0x0000b000,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index d88cbbbf18b7..2fd03f0799b2 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -180,7 +180,6 @@ struct ipa_resource_data {
* @local: array of IPA-local memory region descriptors
* @imem_addr: physical address of IPA region within IMEM
* @imem_size: size in bytes of IPA IMEM region
- * @smem_id: item identifier for IPA region within SMEM memory
* @smem_size: size in bytes of the IPA SMEM region
*/
struct ipa_mem_data {
@@ -188,7 +187,6 @@ struct ipa_mem_data {
const struct ipa_mem *local;
u32 imem_addr;
u32 imem_size;
- u32 smem_id;
u32 smem_size;
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index dee985eb08cb..835a3c9c1fd4 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -26,6 +26,8 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
+#define SMEM_IPA_FILTER_TABLE 497
+
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
{
u32 i;
@@ -509,7 +511,6 @@ static void ipa_imem_exit(struct ipa *ipa)
/**
* ipa_smem_init() - Initialize SMEM memory used by the IPA
* @ipa: IPA pointer
- * @item: Item ID of SMEM memory
* @size: Size (bytes) of SMEM memory region
*
* SMEM is a managed block of shared DRAM, from which numbered "items"
@@ -523,7 +524,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*
* Note: @size and the item address are is not guaranteed to be page-aligned.
*/
-static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
+static int ipa_smem_init(struct ipa *ipa, size_t size)
{
struct device *dev = ipa->dev;
struct iommu_domain *domain;
@@ -545,25 +546,25 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
- ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, item, size);
+ ret = qcom_smem_alloc(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, size);
if (ret && ret != -EEXIST) {
- dev_err(dev, "error %d allocating size %zu SMEM item %u\n",
- ret, size, item);
+ dev_err(dev, "error %d allocating size %zu SMEM item\n",
+ ret, size);
return ret;
}
/* Now get the address of the SMEM memory region */
- virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, item, &actual);
+ virt = qcom_smem_get(QCOM_SMEM_HOST_MODEM, SMEM_IPA_FILTER_TABLE, &actual);
if (IS_ERR(virt)) {
ret = PTR_ERR(virt);
- dev_err(dev, "error %d getting SMEM item %u\n", ret, item);
+ dev_err(dev, "error %d getting SMEM item\n", ret);
return ret;
}
/* In case the region was already allocated, verify the size */
if (ret && actual != size) {
- dev_err(dev, "SMEM item %u has size %zu, expected %zu\n",
- item, actual, size);
+ dev_err(dev, "SMEM item has size %zu, expected %zu\n",
+ actual, size);
return -EINVAL;
}
@@ -659,7 +660,7 @@ int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
if (ret)
goto err_unmap;
- ret = ipa_smem_init(ipa, mem_data->smem_id, mem_data->smem_size);
+ ret = ipa_smem_init(ipa, mem_data->smem_size);
if (ret)
goto err_imem_exit;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index ca62188a317a..e3e65772c599 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -219,7 +219,7 @@ void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
unsigned int ipvlan_mac_hash(const unsigned char *addr)
{
- u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
+ u32 hash = jhash_1word(get_unaligned((u32 *)(addr + 2)),
ipvlan_jhash_secret);
return hash & IPVLAN_MAC_FILTER_MASK;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d0dfa6bca6cc..4df991e494bd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -254,7 +254,7 @@ static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
static unsigned int mc_hash(const struct macvlan_dev *vlan,
const unsigned char *addr)
{
- u32 val = __get_unaligned_cpu32(addr + 2);
+ u32 val = get_unaligned((u32 *)(addr + 2));
val ^= macvlan_hash_mix(vlan);
return hash_32(val, MACVLAN_MC_FILTER_BITS);
@@ -754,13 +754,13 @@ static int macvlan_sync_address(struct net_device *dev,
static int macvlan_set_mac_address(struct net_device *dev, void *p)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- struct sockaddr *addr = p;
+ struct sockaddr_storage *addr = p;
- if (!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->__data))
return -EADDRNOTAVAIL;
/* If the addresses are the same, this is a no-op */
- if (ether_addr_equal(dev->dev_addr, addr->sa_data))
+ if (ether_addr_equal(dev->dev_addr, addr->__data))
return 0;
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
@@ -768,10 +768,10 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
return dev_set_mac_address(vlan->lowerdev, addr, NULL);
}
- if (macvlan_addr_busy(vlan->port, addr->sa_data))
+ if (macvlan_addr_busy(vlan->port, addr->__data))
return -EADDRINUSE;
- return macvlan_sync_address(dev, addr->sa_data);
+ return macvlan_sync_address(dev, addr->__data);
}
static void macvlan_change_rx_flags(struct net_device *dev, int change)
@@ -1295,11 +1295,11 @@ static void macvlan_port_destroy(struct net_device *dev)
*/
if (macvlan_passthru(port) &&
!ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
- struct sockaddr sa;
+ struct sockaddr_storage ss;
- sa.sa_family = port->dev->type;
- memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
- dev_set_mac_address(port->dev, &sa, NULL);
+ ss.ss_family = port->dev->type;
+ memcpy(&ss.__data, port->perm_addr, port->dev->addr_len);
+ dev_set_mac_address(port->dev, &ss, NULL);
}
kfree(port);
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index e8d4b01c3f34..775a386d0aca 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -257,6 +257,8 @@ static int mctp_usb_open(struct net_device *dev)
WRITE_ONCE(mctp_usb->stopped, false);
+ netif_start_queue(dev);
+
return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
}
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 4a7a303be2f7..7db40aaa079d 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -3,49 +3,30 @@
# MDIO Layer Configuration
#
-menuconfig MDIO_DEVICE
- tristate "MDIO bus device drivers"
- help
- MDIO devices and driver infrastructure code.
-
-if MDIO_DEVICE
-
config MDIO_BUS
- tristate
- default m if PHYLIB=m
- default MDIO_DEVICE
+ tristate "MDIO bus consumer layer"
help
- This internal symbol is used for link time dependencies and it
- reflects whether the mdio_bus/mdio_device code is built as a
- loadable module or built-in.
+ MDIO bus consumer layer
+
+if PHYLIB
config FWNODE_MDIO
- def_tristate PHYLIB
- depends on (ACPI || OF) || COMPILE_TEST
+ def_tristate (ACPI || OF) || COMPILE_TEST
select FIXED_PHY
help
FWNODE MDIO bus (Ethernet PHY) accessors
config OF_MDIO
- def_tristate PHYLIB
- depends on OF
- depends on PHYLIB
+ def_tristate OF
select FIXED_PHY
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
config ACPI_MDIO
- def_tristate PHYLIB
- depends on ACPI
- depends on PHYLIB
+ def_tristate ACPI
help
ACPI MDIO bus (Ethernet PHY) accessors
-if MDIO_BUS
-
-config MDIO_DEVRES
- tristate
-
config MDIO_SUN4I
tristate "Allwinner sun4i MDIO interface support"
depends on ARCH_SUNXI || COMPILE_TEST
@@ -65,7 +46,6 @@ config MDIO_ASPEED
tristate "ASPEED MDIO bus controller"
depends on ARCH_ASPEED || COMPILE_TEST
depends on OF_MDIO && HAS_IOMEM
- depends on MDIO_DEVRES
help
This module provides a driver for the independent MDIO bus
controllers found in the ASPEED AST2600 SoC. This is a driver for the
@@ -135,7 +115,6 @@ config MDIO_I2C
config MDIO_MVUSB
tristate "Marvell USB to MDIO Adapter"
depends on USB
- select MDIO_DEVRES
help
A USB to MDIO converter present on development boards for
Marvell's Link Street family of Ethernet switches.
@@ -143,7 +122,6 @@ config MDIO_MVUSB
config MDIO_MSCC_MIIM
tristate "Microsemi MIIM interface support"
depends on HAS_IOMEM && REGMAP_MMIO
- select MDIO_DEVRES
help
This driver supports the MIIM (MDIO) interface found in the network
switches of the Microsemi SoCs; it is recommended to switch on
@@ -161,7 +139,6 @@ config MDIO_OCTEON
depends on (64BIT && OF_MDIO) || COMPILE_TEST
depends on HAS_IOMEM
select MDIO_CAVIUM
- select MDIO_DEVRES
help
This module provides a driver for the Octeon and ThunderX MDIO
buses. It is required by the Octeon and ThunderX ethernet device
@@ -171,7 +148,6 @@ config MDIO_IPQ4019
tristate "Qualcomm IPQ4019 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on COMMON_CLK
- depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in Qualcomm
IPQ40xx, IPQ60xx, IPQ807x and IPQ50xx series Soc-s.
@@ -180,11 +156,17 @@ config MDIO_IPQ8064
tristate "Qualcomm IPQ8064 MDIO interface support"
depends on HAS_IOMEM && OF_MDIO
depends on MFD_SYSCON
- depends on MDIO_DEVRES
help
This driver supports the MDIO interface found in the network
interface units of the IPQ8064 SoC
+config MDIO_REALTEK_RTL9300
+ tristate "Realtek RTL9300 MDIO interface support"
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the Realtek
+ RTL9300 family of Ethernet switches with integrated SoC.
+
config MDIO_REGMAP
tristate
help
@@ -201,7 +183,6 @@ config MDIO_THUNDER
depends on 64BIT
depends on PCI
select MDIO_CAVIUM
- select MDIO_DEVRES
help
This driver supports the MDIO interfaces found on Cavium
ThunderX SoCs when the MDIO bus device appears as a PCI
@@ -299,4 +280,3 @@ config MDIO_BUS_MUX_MMIOREG
endif
-endif
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 1015f0db4531..c23778e73890 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_REALTEK_RTL9300) += mdio-realtek-rtl9300.o
obj-$(CONFIG_MDIO_REGMAP) += mdio-regmap.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 074d96328f41..b6e30bdf5325 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -334,9 +334,9 @@ static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v3.0-mdio", },
{ .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
- { .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,bcm6846-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
diff --git a/drivers/net/mdio/mdio-realtek-rtl9300.c b/drivers/net/mdio/mdio-realtek-rtl9300.c
new file mode 100644
index 000000000000..33694c3ff9a7
--- /dev/null
+++ b/drivers/net/mdio/mdio-realtek-rtl9300.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MDIO controller for RTL9300 switches with integrated SoC.
+ *
+ * The MDIO communication is abstracted by the switch. At the software level
+ * communication uses the switch port to address the PHY. We work out the
+ * mapping based on the MDIO bus described in device tree and phandles on the
+ * ethernet-ports property.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/find.h>
+#include <linux/mdio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define SMI_GLB_CTRL 0xca00
+#define GLB_CTRL_INTF_SEL(intf) BIT(16 + (intf))
+#define SMI_PORT0_15_POLLING_SEL 0xca08
+#define SMI_ACCESS_PHY_CTRL_0 0xcb70
+#define SMI_ACCESS_PHY_CTRL_1 0xcb74
+#define PHY_CTRL_REG_ADDR GENMASK(24, 20)
+#define PHY_CTRL_PARK_PAGE GENMASK(19, 15)
+#define PHY_CTRL_MAIN_PAGE GENMASK(14, 3)
+#define PHY_CTRL_WRITE BIT(2)
+#define PHY_CTRL_READ 0
+#define PHY_CTRL_TYPE_C45 BIT(1)
+#define PHY_CTRL_TYPE_C22 0
+#define PHY_CTRL_CMD BIT(0)
+#define PHY_CTRL_FAIL BIT(25)
+#define SMI_ACCESS_PHY_CTRL_2 0xcb78
+#define PHY_CTRL_INDATA GENMASK(31, 16)
+#define PHY_CTRL_DATA GENMASK(15, 0)
+#define SMI_ACCESS_PHY_CTRL_3 0xcb7c
+#define PHY_CTRL_MMD_DEVAD GENMASK(20, 16)
+#define PHY_CTRL_MMD_REG GENMASK(15, 0)
+#define SMI_PORT0_5_ADDR_CTRL 0xcb80
+
+#define MAX_PORTS 28
+#define MAX_SMI_BUSSES 4
+#define MAX_SMI_ADDR 0x1f
+
+struct rtl9300_mdio_priv {
+ struct regmap *regmap;
+ struct mutex lock; /* protect HW access */
+ DECLARE_BITMAP(valid_ports, MAX_PORTS);
+ u8 smi_bus[MAX_PORTS];
+ u8 smi_addr[MAX_PORTS];
+ bool smi_bus_is_c45[MAX_SMI_BUSSES];
+ struct mii_bus *bus[MAX_SMI_BUSSES];
+};
+
+struct rtl9300_mdio_chan {
+ struct rtl9300_mdio_priv *priv;
+ u8 mdio_bus;
+};
+
+static int rtl9300_mdio_phy_to_port(struct mii_bus *bus, int phy_id)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ int i;
+
+ priv = chan->priv;
+
+ for_each_set_bit(i, priv->valid_ports, MAX_PORTS)
+ if (priv->smi_bus[i] == chan->mdio_bus &&
+ priv->smi_addr[i] == phy_id)
+ return i;
+
+ return -ENOENT;
+}
+
+static int rtl9300_mdio_wait_ready(struct rtl9300_mdio_priv *priv)
+{
+ struct regmap *regmap = priv->regmap;
+ u32 val;
+
+ lockdep_assert_held(&priv->lock);
+
+ return regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 1000);
+}
+
+static int rtl9300_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, FIELD_PREP(PHY_CTRL_INDATA, port));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_REG_ADDR, regnum) |
+ FIELD_PREP(PHY_CTRL_PARK_PAGE, 0x1f) |
+ FIELD_PREP(PHY_CTRL_MAIN_PAGE, 0xfff) |
+ PHY_CTRL_READ | PHY_CTRL_TYPE_C22 | PHY_CTRL_CMD;
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1, val);
+ if (err)
+ goto out_err;
+
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_read(regmap, SMI_ACCESS_PHY_CTRL_2, &val);
+ if (err)
+ goto out_err;
+
+ mutex_unlock(&priv->lock);
+ return FIELD_GET(PHY_CTRL_DATA, val);
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_0, BIT(port));
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, FIELD_PREP(PHY_CTRL_INDATA, value));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_REG_ADDR, regnum) |
+ FIELD_PREP(PHY_CTRL_PARK_PAGE, 0x1f) |
+ FIELD_PREP(PHY_CTRL_MAIN_PAGE, 0xfff) |
+ PHY_CTRL_WRITE | PHY_CTRL_TYPE_C22 | PHY_CTRL_CMD;
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 100);
+ if (err)
+ goto out_err;
+
+ if (val & PHY_CTRL_FAIL) {
+ err = -ENXIO;
+ goto out_err;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr, int regnum)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_INDATA, port);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, val);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_MMD_DEVAD, dev_addr) |
+ FIELD_PREP(PHY_CTRL_MMD_REG, regnum);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_3, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1,
+ PHY_CTRL_READ | PHY_CTRL_TYPE_C45 | PHY_CTRL_CMD);
+ if (err)
+ goto out_err;
+
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_read(regmap, SMI_ACCESS_PHY_CTRL_2, &val);
+ if (err)
+ goto out_err;
+
+ mutex_unlock(&priv->lock);
+ return FIELD_GET(PHY_CTRL_DATA, val);
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct rtl9300_mdio_chan *chan = bus->priv;
+ struct rtl9300_mdio_priv *priv;
+ struct regmap *regmap;
+ int port;
+ u32 val;
+ int err;
+
+ priv = chan->priv;
+ regmap = priv->regmap;
+
+ port = rtl9300_mdio_phy_to_port(bus, phy_id);
+ if (port < 0)
+ return port;
+
+ mutex_lock(&priv->lock);
+ err = rtl9300_mdio_wait_ready(priv);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_0, BIT(port));
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_INDATA, value);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_2, val);
+ if (err)
+ goto out_err;
+
+ val = FIELD_PREP(PHY_CTRL_MMD_DEVAD, dev_addr) |
+ FIELD_PREP(PHY_CTRL_MMD_REG, regnum);
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_3, val);
+ if (err)
+ goto out_err;
+
+ err = regmap_write(regmap, SMI_ACCESS_PHY_CTRL_1,
+ PHY_CTRL_TYPE_C45 | PHY_CTRL_WRITE | PHY_CTRL_CMD);
+ if (err)
+ goto out_err;
+
+ err = regmap_read_poll_timeout(regmap, SMI_ACCESS_PHY_CTRL_1,
+ val, !(val & PHY_CTRL_CMD), 10, 100);
+ if (err)
+ goto out_err;
+
+ if (val & PHY_CTRL_FAIL) {
+ err = -ENXIO;
+ goto out_err;
+ }
+
+ mutex_unlock(&priv->lock);
+ return 0;
+
+out_err:
+ mutex_unlock(&priv->lock);
+ return err;
+}
+
+static int rtl9300_mdiobus_init(struct rtl9300_mdio_priv *priv)
+{
+ u32 glb_ctrl_mask = 0, glb_ctrl_val = 0;
+ struct regmap *regmap = priv->regmap;
+ u32 port_addr[5] = { 0 };
+ u32 poll_sel[2] = { 0 };
+ int i, err;
+
+ /* Associate the port with the SMI interface and PHY */
+ for_each_set_bit(i, priv->valid_ports, MAX_PORTS) {
+ int pos;
+
+ pos = (i % 6) * 5;
+ port_addr[i / 6] |= (priv->smi_addr[i] & 0x1f) << pos;
+
+ pos = (i % 16) * 2;
+ poll_sel[i / 16] |= (priv->smi_bus[i] & 0x3) << pos;
+ }
+
+ /* Put the interfaces into C45 mode if required */
+ glb_ctrl_mask = GENMASK(19, 16);
+ for (i = 0; i < MAX_SMI_BUSSES; i++)
+ if (priv->smi_bus_is_c45[i])
+ glb_ctrl_val |= GLB_CTRL_INTF_SEL(i);
+
+ err = regmap_bulk_write(regmap, SMI_PORT0_5_ADDR_CTRL,
+ port_addr, 5);
+ if (err)
+ return err;
+
+ err = regmap_bulk_write(regmap, SMI_PORT0_15_POLLING_SEL,
+ poll_sel, 2);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(regmap, SMI_GLB_CTRL,
+ glb_ctrl_mask, glb_ctrl_val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_priv *priv,
+ struct fwnode_handle *node)
+{
+ struct rtl9300_mdio_chan *chan;
+ struct fwnode_handle *child;
+ struct mii_bus *bus;
+ u32 mdio_bus;
+ int err;
+
+ err = fwnode_property_read_u32(node, "reg", &mdio_bus);
+ if (err)
+ return err;
+
+ /* The MDIO accesses from the kernel work with the PHY polling unit in
+ * the switch. We need to tell the PPU to operate either in GPHY (i.e.
+ * clause 22) or 10GPHY mode (i.e. clause 45).
+ *
+ * We select 10GPHY mode if there is at least one PHY that declares
+ * compatible = "ethernet-phy-ieee802.3-c45". This does mean we can't
+ * support both c45 and c22 on the same MDIO bus.
+ */
+ fwnode_for_each_child_node(node, child)
+ if (fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"))
+ priv->smi_bus_is_c45[mdio_bus] = true;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*chan));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Realtek Switch MDIO Bus";
+ if (priv->smi_bus_is_c45[mdio_bus]) {
+ bus->read_c45 = rtl9300_mdio_read_c45;
+ bus->write_c45 = rtl9300_mdio_write_c45;
+ } else {
+ bus->read = rtl9300_mdio_read_c22;
+ bus->write = rtl9300_mdio_write_c22;
+ }
+ bus->parent = dev;
+ chan = bus->priv;
+ chan->mdio_bus = mdio_bus;
+ chan->priv = priv;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", dev_name(dev), mdio_bus);
+
+ err = devm_of_mdiobus_register(dev, bus, to_of_node(node));
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
+
+ return 0;
+}
+
+/* The mdio-controller is part of a switch block so we parse the sibling
+ * ethernet-ports node and build a mapping of the switch port to MDIO bus/addr
+ * based on the phy-handle.
+ */
+static int rtl9300_mdiobus_map_ports(struct device *dev)
+{
+ struct rtl9300_mdio_priv *priv = dev_get_drvdata(dev);
+ struct device *parent = dev->parent;
+ struct fwnode_handle *port;
+ int err;
+
+ struct fwnode_handle *ports __free(fwnode_handle) =
+ device_get_named_child_node(parent, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(dev, -EINVAL, "%pfwP missing ethernet-ports\n",
+ dev_fwnode(parent));
+
+ fwnode_for_each_child_node(ports, port) {
+ struct device_node *mdio_dn;
+ u32 addr;
+ u32 bus;
+ u32 pn;
+
+ struct device_node *phy_dn __free(device_node) =
+ of_parse_phandle(to_of_node(port), "phy-handle", 0);
+ /* skip ports without phys */
+ if (!phy_dn)
+ continue;
+
+ mdio_dn = phy_dn->parent;
+ /* only map ports that are connected to this mdio-controller */
+ if (mdio_dn->parent != dev->of_node)
+ continue;
+
+ err = fwnode_property_read_u32(port, "reg", &pn);
+ if (err)
+ return err;
+
+ if (pn >= MAX_PORTS)
+ return dev_err_probe(dev, -EINVAL, "illegal port number %d\n", pn);
+
+ if (test_bit(pn, priv->valid_ports))
+ return dev_err_probe(dev, -EINVAL, "duplicated port number %d\n", pn);
+
+ err = of_property_read_u32(mdio_dn, "reg", &bus);
+ if (err)
+ return err;
+
+ if (bus >= MAX_SMI_BUSSES)
+ return dev_err_probe(dev, -EINVAL, "illegal smi bus number %d\n", bus);
+
+ err = of_property_read_u32(phy_dn, "reg", &addr);
+ if (err)
+ return err;
+
+ __set_bit(pn, priv->valid_ports);
+ priv->smi_bus[pn] = bus;
+ priv->smi_addr[pn] = addr;
+ }
+
+ return 0;
+}
+
+static int rtl9300_mdiobus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtl9300_mdio_priv *priv;
+ struct fwnode_handle *child;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = devm_mutex_init(dev, &priv->lock);
+ if (err)
+ return err;
+
+ priv->regmap = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ platform_set_drvdata(pdev, priv);
+
+ err = rtl9300_mdiobus_map_ports(dev);
+ if (err)
+ return err;
+
+ device_for_each_child_node(dev, child) {
+ err = rtl9300_mdiobus_probe_one(dev, priv, child);
+ if (err)
+ return err;
+ }
+
+ err = rtl9300_mdiobus_init(priv);
+ if (err)
+ return dev_err_probe(dev, err, "failed to initialise MDIO bus controller\n");
+
+ return 0;
+}
+
+static const struct of_device_id rtl9300_mdio_ids[] = {
+ { .compatible = "realtek,rtl9301-mdio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtl9300_mdio_ids);
+
+static struct platform_driver rtl9300_mdio_driver = {
+ .probe = rtl9300_mdiobus_probe,
+ .driver = {
+ .name = "mdio-rtl9300",
+ .of_match_table = rtl9300_mdio_ids,
+ },
+};
+
+module_platform_driver(rtl9300_mdio_driver);
+
+MODULE_DESCRIPTION("RTL9300 MDIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 1e1aa72b1eff..a3047f7258a7 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -40,16 +40,16 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
return err;
}
- err = pci_request_regions(pdev, KBUILD_MODNAME);
+ err = pcim_request_all_regions(pdev, KBUILD_MODNAME);
if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pcim_request_all_regions failed\n");
goto err_disable_device;
}
nexus->bar0 = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
if (!nexus->bar0) {
err = -ENOMEM;
- goto err_release_regions;
+ goto err_disable_device;
}
i = 0;
@@ -107,9 +107,6 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
}
return 0;
-err_release_regions:
- pci_release_regions(pdev);
-
err_disable_device:
pci_set_drvdata(pdev, NULL);
return err;
@@ -129,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
mdiobus_unregister(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 2f4fc664d2e1..98f667b121f7 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -458,7 +458,7 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
register_phy:
- return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np));
+ return PTR_ERR_OR_ZERO(fixed_phy_register(&status, np));
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index d88bdb9a1717..47cdee5577d4 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -85,11 +85,11 @@ static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
return -ENOSPC;
}
-static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+static int nsim_ipsec_parse_proto_keys(struct net_device *dev,
+ struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
- struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -129,17 +129,16 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
return 0;
}
-static int nsim_ipsec_add_sa(struct xfrm_state *xs,
+static int nsim_ipsec_add_sa(struct net_device *dev,
+ struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
struct nsim_ipsec *ipsec;
- struct net_device *dev;
struct netdevsim *ns;
struct nsim_sa sa;
u16 sa_idx;
int ret;
- dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@@ -174,7 +173,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
sa.crypt = xs->ealg || xs->aead;
/* get the key and salt */
- ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+ ret = nsim_ipsec_parse_proto_keys(dev, xs, sa.key, &sa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table");
return ret;
@@ -200,9 +199,9 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs,
return 0;
}
-static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+static void nsim_ipsec_del_sa(struct net_device *dev, struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
+ struct netdevsim *ns = netdev_priv(dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0e0321a7ddd7..af545d42961c 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
#include <net/udp_tunnel.h>
+#include <net/busy_poll.h>
#include "netdevsim.h"
@@ -357,6 +358,7 @@ static int nsim_rcv(struct nsim_rq *rq, int budget)
break;
skb = skb_dequeue(&rq->skb_queue);
+ skb_mark_napi_id(skb, &rq->napi);
netif_receive_skb(skb);
}
@@ -879,11 +881,13 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
+ NETIF_F_LRO |
NETIF_F_TSO;
dev->hw_features |= NETIF_F_HW_TC |
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
+ NETIF_F_LRO |
NETIF_F_TSO;
dev->max_mtu = ETH_MAX_MTU;
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
diff --git a/drivers/net/ovpn/Makefile b/drivers/net/ovpn/Makefile
new file mode 100644
index 000000000000..229be66167e1
--- /dev/null
+++ b/drivers/net/ovpn/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# ovpn -- OpenVPN data channel offload in kernel space
+#
+# Copyright (C) 2020-2025 OpenVPN, Inc.
+#
+# Author: Antonio Quartulli <antonio@openvpn.net>
+
+obj-$(CONFIG_OVPN) := ovpn.o
+ovpn-y += bind.o
+ovpn-y += crypto.o
+ovpn-y += crypto_aead.o
+ovpn-y += main.o
+ovpn-y += io.o
+ovpn-y += netlink.o
+ovpn-y += netlink-gen.o
+ovpn-y += peer.o
+ovpn-y += pktid.o
+ovpn-y += socket.o
+ovpn-y += stats.o
+ovpn-y += tcp.o
+ovpn-y += udp.o
diff --git a/drivers/net/ovpn/bind.c b/drivers/net/ovpn/bind.c
new file mode 100644
index 000000000000..24d2788a277e
--- /dev/null
+++ b/drivers/net/ovpn/bind.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "peer.h"
+
+/**
+ * ovpn_bind_from_sockaddr - retrieve binding matching sockaddr
+ * @ss: the sockaddr to match
+ *
+ * Return: the bind matching the passed sockaddr if found, NULL otherwise
+ */
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind;
+ size_t sa_len;
+
+ if (ss->ss_family == AF_INET)
+ sa_len = sizeof(struct sockaddr_in);
+ else if (ss->ss_family == AF_INET6)
+ sa_len = sizeof(struct sockaddr_in6);
+ else
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ bind = kzalloc(sizeof(*bind), GFP_ATOMIC);
+ if (unlikely(!bind))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&bind->remote, ss, sa_len);
+
+ return bind;
+}
+
+/**
+ * ovpn_bind_reset - assign new binding to peer
+ * @peer: the peer whose binding has to be replaced
+ * @new: the new bind to assign
+ */
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new)
+{
+ lockdep_assert_held(&peer->lock);
+
+ kfree_rcu(rcu_replace_pointer(peer->bind, new,
+ lockdep_is_held(&peer->lock)), rcu);
+}
diff --git a/drivers/net/ovpn/bind.h b/drivers/net/ovpn/bind.h
new file mode 100644
index 000000000000..4e0b8398bfd9
--- /dev/null
+++ b/drivers/net/ovpn/bind.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2012-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNBIND_H_
+#define _NET_OVPN_OVPNBIND_H_
+
+#include <net/ip.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+struct ovpn_peer;
+
+/**
+ * union ovpn_sockaddr - basic transport layer address
+ * @in4: IPv4 address
+ * @in6: IPv6 address
+ */
+union ovpn_sockaddr {
+ struct sockaddr_in in4;
+ struct sockaddr_in6 in6;
+};
+
+/**
+ * struct ovpn_bind - remote peer binding
+ * @remote: the remote peer sockaddress
+ * @local: local endpoint used to talk to the peer
+ * @local.ipv4: local IPv4 used to talk to the peer
+ * @local.ipv6: local IPv6 used to talk to the peer
+ * @rcu: used to schedule RCU cleanup job
+ */
+struct ovpn_bind {
+ union ovpn_sockaddr remote; /* remote sockaddr */
+
+ union {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } local;
+
+ struct rcu_head rcu;
+};
+
+/**
+ * ovpn_bind_skb_src_match - match packet source with binding
+ * @bind: the binding to match
+ * @skb: the packet to match
+ *
+ * Return: true if the packet source matches the remote peer sockaddr
+ * in the binding
+ */
+static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind,
+ const struct sk_buff *skb)
+{
+ const union ovpn_sockaddr *remote;
+
+ if (unlikely(!bind))
+ return false;
+
+ remote = &bind->remote;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (unlikely(remote->in4.sin_family != AF_INET))
+ return false;
+
+ if (unlikely(remote->in4.sin_addr.s_addr != ip_hdr(skb)->saddr))
+ return false;
+
+ if (unlikely(remote->in4.sin_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ case htons(ETH_P_IPV6):
+ if (unlikely(remote->in6.sin6_family != AF_INET6))
+ return false;
+
+ if (unlikely(!ipv6_addr_equal(&remote->in6.sin6_addr,
+ &ipv6_hdr(skb)->saddr)))
+ return false;
+
+ if (unlikely(remote->in6.sin6_port != udp_hdr(skb)->source))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa);
+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind);
+
+#endif /* _NET_OVPN_OVPNBIND_H_ */
diff --git a/drivers/net/ovpn/crypto.c b/drivers/net/ovpn/crypto.c
new file mode 100644
index 000000000000..90580e32052f
--- /dev/null
+++ b/drivers/net/ovpn/crypto.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+
+static void ovpn_ks_destroy_rcu(struct rcu_head *head)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(head, struct ovpn_crypto_key_slot, rcu);
+ ovpn_aead_crypto_key_slot_destroy(ks);
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = container_of(kref, struct ovpn_crypto_key_slot, refcount);
+ call_rcu(&ks->rcu, ovpn_ks_destroy_rcu);
+}
+
+/* can only be invoked when all peer references have been dropped (i.e. RCU
+ * release routine)
+ */
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ ks = rcu_access_pointer(cs->slots[0]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+
+ ks = rcu_access_pointer(cs->slots[1]);
+ if (ks) {
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ ovpn_crypto_key_slot_put(ks);
+ }
+}
+
+/* removes the key matching the specified id from the crypto context */
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+
+ spin_lock_bh(&cs->lock);
+ if (rcu_access_pointer(cs->slots[0])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[0], NULL,
+ lockdep_is_held(&cs->lock));
+ } else if (rcu_access_pointer(cs->slots[1])->key_id == key_id) {
+ ks = rcu_replace_pointer(cs->slots[1], NULL,
+ lockdep_is_held(&cs->lock));
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (ks)
+ ovpn_crypto_key_slot_put(ks);
+
+ /* let the caller know if a key was actually killed */
+ return ks;
+}
+
+/* Reset the ovpn_crypto_state object in a way that is atomic
+ * to RCU readers.
+ */
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr)
+{
+ struct ovpn_crypto_key_slot *old = NULL, *new;
+ u8 idx;
+
+ if (pkr->slot != OVPN_KEY_SLOT_PRIMARY &&
+ pkr->slot != OVPN_KEY_SLOT_SECONDARY)
+ return -EINVAL;
+
+ new = ovpn_aead_crypto_key_slot_new(&pkr->key);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (pkr->slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ old = rcu_replace_pointer(cs->slots[idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ old = rcu_replace_pointer(cs->slots[!idx], new,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (old)
+ ovpn_crypto_key_slot_put(old);
+
+ return 0;
+}
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ u8 idx;
+
+ if (slot != OVPN_KEY_SLOT_PRIMARY &&
+ slot != OVPN_KEY_SLOT_SECONDARY) {
+ pr_warn("Invalid slot to release: %u\n", slot);
+ return;
+ }
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ ks = rcu_replace_pointer(cs->slots[idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ ks = rcu_replace_pointer(cs->slots[!idx], NULL,
+ lockdep_is_held(&cs->lock));
+ break;
+ }
+ spin_unlock_bh(&cs->lock);
+
+ if (!ks) {
+ pr_debug("Key slot already released: %u\n", slot);
+ return;
+ }
+
+ pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id);
+ ovpn_crypto_key_slot_put(ks);
+}
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs)
+{
+ const struct ovpn_crypto_key_slot *old_primary, *old_secondary;
+ u8 idx;
+
+ spin_lock_bh(&cs->lock);
+ idx = cs->primary_idx;
+ old_primary = rcu_dereference_protected(cs->slots[idx],
+ lockdep_is_held(&cs->lock));
+ old_secondary = rcu_dereference_protected(cs->slots[!idx],
+ lockdep_is_held(&cs->lock));
+ /* perform real swap by switching the index of the primary key */
+ WRITE_ONCE(cs->primary_idx, !cs->primary_idx);
+
+ pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n",
+ old_primary ? old_primary->key_id : -1,
+ old_secondary ? old_secondary->key_id : -1);
+
+ spin_unlock_bh(&cs->lock);
+}
+
+/**
+ * ovpn_crypto_config_get - populate keyconf object with non-sensible key data
+ * @cs: the crypto state to extract the key data from
+ * @slot: the specific slot to inspect
+ * @keyconf: the output object to populate
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf)
+{
+ struct ovpn_crypto_key_slot *ks;
+ int idx;
+
+ switch (slot) {
+ case OVPN_KEY_SLOT_PRIMARY:
+ idx = cs->primary_idx;
+ break;
+ case OVPN_KEY_SLOT_SECONDARY:
+ idx = !cs->primary_idx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[idx]);
+ if (!ks) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ keyconf->cipher_alg = ovpn_aead_crypto_alg(ks);
+ keyconf->key_id = ks->key_id;
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/ovpn/crypto.h b/drivers/net/ovpn/crypto.h
new file mode 100644
index 000000000000..0e284fec3a75
--- /dev/null
+++ b/drivers/net/ovpn/crypto.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNCRYPTO_H_
+#define _NET_OVPN_OVPNCRYPTO_H_
+
+#include "pktid.h"
+#include "proto.h"
+
+/* info needed for both encrypt and decrypt directions */
+struct ovpn_key_direction {
+ const u8 *cipher_key;
+ size_t cipher_key_size;
+ const u8 *nonce_tail; /* only needed for GCM modes */
+ size_t nonce_tail_size; /* only needed for GCM modes */
+};
+
+/* all info for a particular symmetric key (primary or secondary) */
+struct ovpn_key_config {
+ enum ovpn_cipher_alg cipher_alg;
+ u8 key_id;
+ struct ovpn_key_direction encrypt;
+ struct ovpn_key_direction decrypt;
+};
+
+/* used to pass settings from netlink to the crypto engine */
+struct ovpn_peer_key_reset {
+ enum ovpn_key_slot slot;
+ struct ovpn_key_config key;
+};
+
+struct ovpn_crypto_key_slot {
+ u8 key_id;
+
+ struct crypto_aead *encrypt;
+ struct crypto_aead *decrypt;
+ u8 nonce_tail_xmit[OVPN_NONCE_TAIL_SIZE];
+ u8 nonce_tail_recv[OVPN_NONCE_TAIL_SIZE];
+
+ struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp;
+ struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp;
+ struct kref refcount;
+ struct rcu_head rcu;
+};
+
+struct ovpn_crypto_state {
+ struct ovpn_crypto_key_slot __rcu *slots[2];
+ u8 primary_idx;
+
+ /* protects primary and secondary slots */
+ spinlock_t lock;
+};
+
+static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks)
+{
+ return kref_get_unless_zero(&ks->refcount);
+}
+
+static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs)
+{
+ RCU_INIT_POINTER(cs->slots[0], NULL);
+ RCU_INIT_POINTER(cs->slots[1], NULL);
+ cs->primary_idx = 0;
+ spin_lock_init(&cs->lock);
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 idx;
+
+ if (unlikely(!cs))
+ return NULL;
+
+ rcu_read_lock();
+ idx = READ_ONCE(cs->primary_idx);
+ ks = rcu_dereference(cs->slots[idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ ks = rcu_dereference(cs->slots[!idx]);
+ if (ks && ks->key_id == key_id) {
+ if (unlikely(!ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ goto out;
+ }
+
+ /* when both key slots are occupied but no matching key ID is found, ks
+ * has to be reset to NULL to avoid carrying a stale pointer
+ */
+ ks = NULL;
+out:
+ rcu_read_unlock();
+
+ return ks;
+}
+
+static inline struct ovpn_crypto_key_slot *
+ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ rcu_read_lock();
+ ks = rcu_dereference(cs->slots[cs->primary_idx]);
+ if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks)))
+ ks = NULL;
+ rcu_read_unlock();
+
+ return ks;
+}
+
+void ovpn_crypto_key_slot_release(struct kref *kref);
+
+static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks)
+{
+ kref_put(&ks->refcount, ovpn_crypto_key_slot_release);
+}
+
+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs,
+ const struct ovpn_peer_key_reset *pkr);
+
+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot);
+
+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs);
+
+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs);
+
+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs,
+ enum ovpn_key_slot slot,
+ struct ovpn_key_config *keyconf);
+
+bool ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id);
+
+#endif /* _NET_OVPN_OVPNCRYPTO_H_ */
diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c
new file mode 100644
index 000000000000..2cca759feffa
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "pktid.h"
+#include "crypto_aead.h"
+#include "crypto.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+
+#define OVPN_AUTH_TAG_SIZE 16
+#define OVPN_AAD_SIZE (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
+
+#define ALG_NAME_AES "gcm(aes)"
+#define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)"
+
+static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
+{
+ return OVPN_OPCODE_SIZE + /* OP header size */
+ sizeof(u32) + /* Packet ID */
+ crypto_aead_authsize(ks->encrypt); /* Auth Tag */
+}
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ int nfrags, ret;
+ u32 pktid, op;
+ u8 *iv;
+
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* Sample AEAD header format:
+ * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
+ * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ]
+ * [4-byte
+ * IV head]
+ */
+
+ /* check that there's enough headroom in the skb for packet
+ * encapsulation
+ */
+ if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
+ return -ENOBUFS;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* build scatterlist to encrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ netdev_err(peer->ovpn->dev,
+ "encrypt: cannot map skb to sg: %d\n", ret);
+ return ret;
+ }
+
+ /* append auth_tag onto scatterlist */
+ __skb_push(skb, tag_size);
+ sg_set_buf(sg + ret + 1, skb->data, tag_size);
+
+ /* obtain packet ID, which is used both as a first
+ * 4 bytes of nonce and last 4 bytes of associated data.
+ */
+ ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
+ if (unlikely(ret < 0))
+ return ret;
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
+ * nonce
+ */
+ ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
+
+ /* make space for packet id and push it to the front */
+ __skb_push(skb, OVPN_NONCE_WIRE_SIZE);
+ memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
+
+ /* add packet op as head of additional data */
+ op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
+ __skb_push(skb, OVPN_OPCODE_SIZE);
+ BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
+ *((__force __be32 *)skb->data) = htonl(op);
+
+ /* AEAD Additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ req = aead_request_alloc(ks->encrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->encrypt);
+ aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg,
+ skb->len - ovpn_aead_encap_overhead(ks), iv);
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* encrypt it */
+ return crypto_aead_encrypt(req);
+}
+
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb)
+{
+ const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
+ int ret, payload_len, nfrags;
+ unsigned int payload_offset;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ u8 *iv;
+
+ payload_offset = OVPN_AAD_SIZE + tag_size;
+ payload_len = skb->len - payload_offset;
+
+ ovpn_skb_cb(skb)->payload_offset = payload_offset;
+ ovpn_skb_cb(skb)->peer = peer;
+ ovpn_skb_cb(skb)->ks = ks;
+
+ /* sanity check on packet size, payload size must be >= 0 */
+ if (unlikely(payload_len < 0))
+ return -EINVAL;
+
+ /* Prepare the skb data buffer to be accessed up until the auth tag.
+ * This is required because this area is directly mapped into the sg
+ * list.
+ */
+ if (unlikely(!pskb_may_pull(skb, payload_offset)))
+ return -ENODATA;
+
+ /* get number of skb frags and ensure that packet data is writable */
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (unlikely(nfrags < 0))
+ return nfrags;
+
+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
+ return -ENOSPC;
+
+ /* sg may be required by async crypto */
+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
+ (nfrags + 2), GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->sg))
+ return -ENOMEM;
+
+ sg = ovpn_skb_cb(skb)->sg;
+
+ /* sg table:
+ * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
+ * 1, 2, 3, ..., n: payload,
+ * n+1: auth_tag (len=tag_size)
+ */
+ sg_init_table(sg, nfrags + 2);
+
+ /* packet op is head of additional data */
+ sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
+
+ /* build scatterlist to decrypt packet payload */
+ ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
+ if (unlikely(ret < 0)) {
+ netdev_err(peer->ovpn->dev,
+ "decrypt: cannot map skb to sg: %d\n", ret);
+ return ret;
+ }
+
+ /* append auth_tag onto scatterlist */
+ sg_set_buf(sg + ret + 1, skb->data + OVPN_AAD_SIZE, tag_size);
+
+ /* iv may be required by async crypto */
+ ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
+ if (unlikely(!ovpn_skb_cb(skb)->iv))
+ return -ENOMEM;
+
+ iv = ovpn_skb_cb(skb)->iv;
+
+ /* copy nonce into IV buffer */
+ memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
+ memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
+ OVPN_NONCE_TAIL_SIZE);
+
+ req = aead_request_alloc(ks->decrypt, GFP_ATOMIC);
+ if (unlikely(!req))
+ return -ENOMEM;
+
+ ovpn_skb_cb(skb)->req = req;
+
+ /* setup async crypto operation */
+ aead_request_set_tfm(req, ks->decrypt);
+ aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
+ aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
+
+ aead_request_set_ad(req, OVPN_AAD_SIZE);
+
+ /* decrypt it */
+ return crypto_aead_decrypt(req);
+}
+
+/* Initialize a struct crypto_aead object */
+static struct crypto_aead *ovpn_aead_init(const char *title,
+ const char *alg_name,
+ const unsigned char *key,
+ unsigned int keylen)
+{
+ struct crypto_aead *aead;
+ int ret;
+
+ aead = crypto_alloc_aead(alg_name, 0, 0);
+ if (IS_ERR(aead)) {
+ ret = PTR_ERR(aead);
+ pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
+ aead = NULL;
+ goto error;
+ }
+
+ ret = crypto_aead_setkey(aead, key, keylen);
+ if (ret) {
+ pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
+ keylen, ret);
+ goto error;
+ }
+
+ ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
+ if (ret) {
+ pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
+ ret);
+ goto error;
+ }
+
+ /* basic AEAD assumption */
+ if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
+ pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ pr_debug("********* Cipher %s (%s)\n", alg_name, title);
+ pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
+ pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
+ pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
+ pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
+ pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
+
+ return aead;
+
+error:
+ crypto_free_aead(aead);
+ return ERR_PTR(ret);
+}
+
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
+{
+ if (!ks)
+ return;
+
+ crypto_free_aead(ks->encrypt);
+ crypto_free_aead(ks->decrypt);
+ kfree(ks);
+}
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
+{
+ struct ovpn_crypto_key_slot *ks = NULL;
+ const char *alg_name;
+ int ret;
+
+ /* validate crypto alg */
+ switch (kc->cipher_alg) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ alg_name = ALG_NAME_AES;
+ break;
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ alg_name = ALG_NAME_CHACHAPOLY;
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
+ kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ /* build the key slot */
+ ks = kmalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks)
+ return ERR_PTR(-ENOMEM);
+
+ ks->encrypt = NULL;
+ ks->decrypt = NULL;
+ kref_init(&ks->refcount);
+ ks->key_id = kc->key_id;
+
+ ks->encrypt = ovpn_aead_init("encrypt", alg_name,
+ kc->encrypt.cipher_key,
+ kc->encrypt.cipher_key_size);
+ if (IS_ERR(ks->encrypt)) {
+ ret = PTR_ERR(ks->encrypt);
+ ks->encrypt = NULL;
+ goto destroy_ks;
+ }
+
+ ks->decrypt = ovpn_aead_init("decrypt", alg_name,
+ kc->decrypt.cipher_key,
+ kc->decrypt.cipher_key_size);
+ if (IS_ERR(ks->decrypt)) {
+ ret = PTR_ERR(ks->decrypt);
+ ks->decrypt = NULL;
+ goto destroy_ks;
+ }
+
+ memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+ memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
+ OVPN_NONCE_TAIL_SIZE);
+
+ /* init packet ID generation/validation */
+ ovpn_pktid_xmit_init(&ks->pid_xmit);
+ ovpn_pktid_recv_init(&ks->pid_recv);
+
+ return ks;
+
+destroy_ks:
+ ovpn_aead_crypto_key_slot_destroy(ks);
+ return ERR_PTR(ret);
+}
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
+{
+ const char *alg_name;
+
+ if (!ks->encrypt)
+ return OVPN_CIPHER_ALG_NONE;
+
+ alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
+
+ if (!strcmp(alg_name, ALG_NAME_AES))
+ return OVPN_CIPHER_ALG_AES_GCM;
+ else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
+ return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
+ else
+ return OVPN_CIPHER_ALG_NONE;
+}
diff --git a/drivers/net/ovpn/crypto_aead.h b/drivers/net/ovpn/crypto_aead.h
new file mode 100644
index 000000000000..65a2ff307898
--- /dev/null
+++ b/drivers/net/ovpn/crypto_aead.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNAEAD_H_
+#define _NET_OVPN_OVPNAEAD_H_
+
+#include "crypto.h"
+
+#include <asm/types.h>
+#include <linux/skbuff.h>
+
+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
+ struct sk_buff *skb);
+
+struct ovpn_crypto_key_slot *
+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc);
+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks);
+
+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks);
+
+#endif /* _NET_OVPN_OVPNAEAD_H_ */
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
new file mode 100644
index 000000000000..10d8afecec55
--- /dev/null
+++ b/drivers/net/ovpn/io.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <crypto/aead.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/gro_cells.h>
+#include <net/gso.h>
+#include <net/ip.h>
+
+#include "ovpnpriv.h"
+#include "peer.h"
+#include "io.h"
+#include "bind.h"
+#include "crypto.h"
+#include "crypto_aead.h"
+#include "netlink.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+#include "skb.h"
+#include "socket.h"
+
+const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
+ 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
+ 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
+};
+
+/**
+ * ovpn_is_keepalive - check if skb contains a keepalive message
+ * @skb: packet to check
+ *
+ * Assumes that the first byte of skb->data is defined.
+ *
+ * Return: true if skb contains a keepalive or false otherwise
+ */
+static bool ovpn_is_keepalive(struct sk_buff *skb)
+{
+ if (*skb->data != ovpn_keepalive_message[0])
+ return false;
+
+ if (skb->len != OVPN_KEEPALIVE_SIZE)
+ return false;
+
+ if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
+ return false;
+
+ return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
+}
+
+/* Called after decrypt to write the IP packet to the device.
+ * This method is expected to manage/free the skb.
+ */
+static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ unsigned int pkt_len;
+ int ret;
+
+ /* we can't guarantee the packet wasn't corrupted before entering the
+ * VPN, therefore we give other layers a chance to check that
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* skb hash for transport packet no longer valid after decapsulation */
+ skb_clear_hash(skb);
+
+ /* post-decrypt scrub -- prepare to inject encapsulated packet onto the
+ * interface, based on __skb_tunnel_rx() in dst.h
+ */
+ skb->dev = peer->ovpn->dev;
+ skb_set_queue_mapping(skb, 0);
+ skb_scrub_packet(skb, true);
+
+ /* network header reset in ovpn_decrypt_post() */
+ skb_reset_transport_header(skb);
+ skb_reset_inner_headers(skb);
+
+ /* cause packet to be "received" by the interface */
+ pkt_len = skb->len;
+ ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
+ if (likely(ret == NET_RX_SUCCESS)) {
+ /* update RX stats with the size of decrypted packet */
+ ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
+ dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
+ }
+}
+
+void ovpn_decrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ unsigned int payload_offset = 0;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ __be32 *pid;
+
+ /* crypto is happening asynchronously. this function will be called
+ * again later by the crypto callback with a proper return code
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ payload_offset = ovpn_skb_cb(skb)->payload_offset;
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret < 0))
+ goto drop;
+
+ /* PID sits after the op */
+ pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
+ ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
+ if (unlikely(ret < 0)) {
+ net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ ret);
+ goto drop;
+ }
+
+ /* keep track of last received authenticated packet for keepalive */
+ WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ /* check if this peer changed local or remote endpoint */
+ ovpn_peer_endpoints_update(peer, skb);
+ rcu_read_unlock();
+
+ /* point to encapsulated IP packet */
+ __skb_pull(skb, payload_offset);
+
+ /* check if this is a valid datapacket that has to be delivered to the
+ * ovpn interface
+ */
+ skb_reset_network_header(skb);
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto)) {
+ /* check if null packet */
+ if (unlikely(!pskb_may_pull(skb, 1))) {
+ net_info_ratelimited("%s: NULL packet received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ goto drop;
+ }
+
+ if (ovpn_is_keepalive(skb)) {
+ net_dbg_ratelimited("%s: ping received from peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id);
+ /* we drop the packet, but this is not a failure */
+ consume_skb(skb);
+ goto drop_nocount;
+ }
+
+ net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto drop;
+ }
+ skb->protocol = proto;
+
+ /* perform Reverse Path Filtering (RPF) */
+ if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
+ if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ipv6_hdr(skb)->saddr);
+ else
+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &ip_hdr(skb)->saddr);
+ goto drop;
+ }
+
+ ovpn_netdev_write(peer, skb);
+ /* skb is passed to upper layer - don't free it */
+ skb = NULL;
+drop:
+ if (unlikely(skb))
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+drop_nocount:
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+}
+
+/* RX path entry point: decrypt packet and forward it to the device */
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+ u8 key_id;
+
+ ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
+
+ /* get the key slot matching the key ID in the received packet */
+ key_id = ovpn_key_id_from_skb(skb);
+ ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
+ if (unlikely(!ks)) {
+ net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
+ netdev_name(peer->ovpn->dev), peer->id,
+ key_id);
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
+}
+
+void ovpn_encrypt_post(void *data, int ret)
+{
+ struct ovpn_crypto_key_slot *ks;
+ struct sk_buff *skb = data;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ unsigned int orig_len;
+
+ /* encryption is happening asynchronously. This function will be
+ * called later by the crypto callback with a proper return value
+ */
+ if (unlikely(ret == -EINPROGRESS))
+ return;
+
+ ks = ovpn_skb_cb(skb)->ks;
+ peer = ovpn_skb_cb(skb)->peer;
+
+ /* crypto is done, cleanup skb CB and its members */
+ kfree(ovpn_skb_cb(skb)->iv);
+ kfree(ovpn_skb_cb(skb)->sg);
+ aead_request_free(ovpn_skb_cb(skb)->req);
+
+ if (unlikely(ret == -ERANGE)) {
+ /* we ran out of IVs and we must kill the key as it can't be
+ * use anymore
+ */
+ netdev_warn(peer->ovpn->dev,
+ "killing key %u for peer %u\n", ks->key_id,
+ peer->id);
+ if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
+ /* let userspace know so that a new key must be negotiated */
+ ovpn_nl_key_swap_notify(peer, ks->key_id);
+
+ goto err;
+ }
+
+ if (unlikely(ret < 0))
+ goto err;
+
+ skb_mark_not_on_list(skb);
+ orig_len = skb->len;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (unlikely(!sock))
+ goto err_unlock;
+
+ switch (sock->sock->sk->sk_protocol) {
+ case IPPROTO_UDP:
+ ovpn_udp_send_skb(peer, sock->sock, skb);
+ break;
+ case IPPROTO_TCP:
+ ovpn_tcp_send_skb(peer, sock->sock, skb);
+ break;
+ default:
+ /* no transport configured yet */
+ goto err_unlock;
+ }
+
+ ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
+ /* keep track of last sent packet for keepalive */
+ WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
+ /* skb passed down the stack - don't free it */
+ skb = NULL;
+err_unlock:
+ rcu_read_unlock();
+err:
+ if (unlikely(skb))
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ if (likely(peer))
+ ovpn_peer_put(peer);
+ if (likely(ks))
+ ovpn_crypto_key_slot_put(ks);
+ kfree_skb(skb);
+}
+
+static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct ovpn_crypto_key_slot *ks;
+
+ /* get primary key to be used for encrypting data */
+ ks = ovpn_crypto_key_slot_primary(&peer->crypto);
+ if (unlikely(!ks))
+ return false;
+
+ /* take a reference to the peer because the crypto code may run async.
+ * ovpn_encrypt_post() will release it upon completion
+ */
+ if (unlikely(!ovpn_peer_hold(peer))) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ ovpn_crypto_key_slot_put(ks);
+ return false;
+ }
+
+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
+ ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
+ return true;
+}
+
+/* send skb to connected peer, if any */
+static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ struct sk_buff *curr, *next;
+
+ /* this might be a GSO-segmented skb list: process each skb
+ * independently
+ */
+ skb_list_walk_safe(skb, curr, next) {
+ if (unlikely(!ovpn_encrypt_one(peer, curr))) {
+ dev_dstats_tx_dropped(ovpn->dev);
+ kfree_skb(curr);
+ }
+ }
+
+ ovpn_peer_put(peer);
+}
+
+/* Send user data to the network
+ */
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct sk_buff *segments, *curr, *next;
+ struct sk_buff_head skb_list;
+ struct ovpn_peer *peer;
+ __be16 proto;
+ int ret;
+
+ /* reset netfilter state */
+ nf_reset_ct(skb);
+
+ /* verify IP header size in network packet */
+ proto = ovpn_ip_check_protocol(skb);
+ if (unlikely(!proto || skb->protocol != proto))
+ goto drop;
+
+ if (skb_is_gso(skb)) {
+ segments = skb_gso_segment(skb, 0);
+ if (IS_ERR(segments)) {
+ ret = PTR_ERR(segments);
+ net_err_ratelimited("%s: cannot segment payload packet: %d\n",
+ netdev_name(dev), ret);
+ goto drop;
+ }
+
+ consume_skb(skb);
+ skb = segments;
+ }
+
+ /* from this moment on, "skb" might be a list */
+
+ __skb_queue_head_init(&skb_list);
+ skb_list_walk_safe(skb, curr, next) {
+ skb_mark_not_on_list(curr);
+
+ curr = skb_share_check(curr, GFP_ATOMIC);
+ if (unlikely(!curr)) {
+ net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
+ netdev_name(dev));
+ dev_dstats_tx_dropped(ovpn->dev);
+ continue;
+ }
+
+ __skb_queue_tail(&skb_list, curr);
+ }
+ skb_list.prev->next = NULL;
+
+ /* retrieve peer serving the destination IP of this packet */
+ peer = ovpn_peer_get_by_dst(ovpn, skb);
+ if (unlikely(!peer)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
+ netdev_name(ovpn->dev),
+ &ip_hdr(skb)->daddr);
+ break;
+ case htons(ETH_P_IPV6):
+ net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
+ netdev_name(ovpn->dev),
+ &ipv6_hdr(skb)->daddr);
+ break;
+ }
+ goto drop;
+ }
+ /* dst was needed for peer selection - it can now be dropped */
+ skb_dst_drop(skb);
+
+ ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len);
+ ovpn_send(ovpn, skb_list.next, peer);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_dstats_tx_dropped(ovpn->dev);
+ skb_tx_error(skb);
+ kfree_skb_list(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
+ * @peer: peer to send the message to
+ * @data: message content
+ * @len: message length
+ *
+ * Assumes that caller holds a reference to peer, which will be
+ * passed to ovpn_send()
+ */
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len)
+{
+ struct ovpn_priv *ovpn;
+ struct sk_buff *skb;
+
+ ovpn = peer->ovpn;
+ if (unlikely(!ovpn)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb = alloc_skb(256 + len, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ ovpn_peer_put(peer);
+ return;
+ }
+
+ skb_reserve(skb, 128);
+ skb->priority = TC_PRIO_BESTEFFORT;
+ __skb_put_data(skb, data, len);
+
+ ovpn_send(ovpn, skb, peer);
+}
diff --git a/drivers/net/ovpn/io.h b/drivers/net/ovpn/io.h
new file mode 100644
index 000000000000..db9e10f9077c
--- /dev/null
+++ b/drivers/net/ovpn/io.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPN_H_
+#define _NET_OVPN_OVPN_H_
+
+/* DATA_V2 header size with AEAD encryption */
+#define OVPN_HEAD_ROOM (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE + \
+ 16 /* AEAD TAG length */ + \
+ max(sizeof(struct udphdr), sizeof(struct tcphdr)) +\
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)))
+
+/* max padding required by encryption */
+#define OVPN_MAX_PADDING 16
+
+#define OVPN_KEEPALIVE_SIZE 16
+extern const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE];
+
+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb);
+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
+ const unsigned int len);
+
+void ovpn_encrypt_post(void *data, int ret);
+void ovpn_decrypt_post(void *data, int ret);
+
+#endif /* _NET_OVPN_OVPN_H_ */
diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c
new file mode 100644
index 000000000000..1bb1afe766a4
--- /dev/null
+++ b/drivers/net/ovpn/main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/ethtool.h>
+#include <linux/genetlink.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/gro_cells.h>
+#include <net/ip.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/if_arp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_priv_free(struct net_device *net)
+{
+ struct ovpn_priv *ovpn = netdev_priv(net);
+
+ kfree(ovpn->peers);
+}
+
+static int ovpn_mp_alloc(struct ovpn_priv *ovpn)
+{
+ struct in_device *dev_v4;
+ int i;
+
+ if (ovpn->mode != OVPN_MODE_MP)
+ return 0;
+
+ dev_v4 = __in_dev_get_rtnl(ovpn->dev);
+ if (dev_v4) {
+ /* disable redirects as Linux gets confused by ovpn
+ * handling same-LAN routing.
+ * This happens because a multipeer interface is used as
+ * relay point between hosts in the same subnet, while
+ * in a classic LAN this would not be needed because the
+ * two hosts would be able to talk directly.
+ */
+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
+ IPV4_DEVCONF_ALL(dev_net(ovpn->dev), SEND_REDIRECTS) = false;
+ }
+
+ /* the peer container is fairly large, therefore we allocate it only in
+ * MP mode
+ */
+ ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL);
+ if (!ovpn->peers)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) {
+ INIT_HLIST_HEAD(&ovpn->peers->by_id[i]);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr4[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr6[i], i);
+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i], i);
+ }
+
+ return 0;
+}
+
+static int ovpn_net_init(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ int err = gro_cells_init(&ovpn->gro_cells, dev);
+
+ if (err < 0)
+ return err;
+
+ err = ovpn_mp_alloc(ovpn);
+ if (err < 0) {
+ gro_cells_destroy(&ovpn->gro_cells);
+ return err;
+ }
+
+ return 0;
+}
+
+static void ovpn_net_uninit(struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ gro_cells_destroy(&ovpn->gro_cells);
+}
+
+static const struct net_device_ops ovpn_netdev_ops = {
+ .ndo_init = ovpn_net_init,
+ .ndo_uninit = ovpn_net_uninit,
+ .ndo_start_xmit = ovpn_net_xmit,
+};
+
+static const struct device_type ovpn_type = {
+ .name = OVPN_FAMILY_NAME,
+};
+
+static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = {
+ [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, OVPN_MODE_P2P,
+ OVPN_MODE_MP),
+};
+
+/**
+ * ovpn_dev_is_valid - check if the netdevice is of type 'ovpn'
+ * @dev: the interface to check
+ *
+ * Return: whether the netdevice is of type 'ovpn'
+ */
+bool ovpn_dev_is_valid(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ovpn_netdev_ops;
+}
+
+static void ovpn_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "ovpn", sizeof(info->driver));
+ strscpy(info->bus_info, "ovpn", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops ovpn_ethtool_ops = {
+ .get_drvinfo = ovpn_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static void ovpn_setup(struct net_device *dev)
+{
+ netdev_features_t feat = NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA;
+
+ dev->needs_free_netdev = true;
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ dev->ethtool_ops = &ovpn_ethtool_ops;
+ dev->netdev_ops = &ovpn_netdev_ops;
+
+ dev->priv_destructor = ovpn_priv_free;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN - OVPN_HEAD_ROOM;
+ dev->min_mtu = IPV4_MIN_MTU;
+ dev->max_mtu = IP_MAX_MTU - OVPN_HEAD_ROOM;
+
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ /* when routing packets to a LAN behind a client, we rely on the
+ * route entry that originally brought the packet into ovpn, so
+ * don't release it
+ */
+ netif_keep_dst(dev);
+
+ dev->lltx = true;
+ dev->features |= feat;
+ dev->hw_features |= feat;
+ dev->hw_enc_features |= feat;
+
+ dev->needed_headroom = ALIGN(OVPN_HEAD_ROOM, 4);
+ dev->needed_tailroom = OVPN_MAX_PADDING;
+
+ SET_NETDEV_DEVTYPE(dev, &ovpn_type);
+}
+
+static int ovpn_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ enum ovpn_mode mode = OVPN_MODE_P2P;
+
+ if (data && data[IFLA_OVPN_MODE]) {
+ mode = nla_get_u8(data[IFLA_OVPN_MODE]);
+ netdev_dbg(dev, "setting device mode: %u\n", mode);
+ }
+
+ ovpn->dev = dev;
+ ovpn->mode = mode;
+ spin_lock_init(&ovpn->lock);
+ INIT_DELAYED_WORK(&ovpn->keepalive_work, ovpn_peer_keepalive_work);
+
+ /* Set carrier explicitly after registration, this way state is
+ * clearly defined.
+ *
+ * In case of MP interfaces we keep the carrier always on.
+ *
+ * Carrier for P2P interfaces is initially off and it is then
+ * switched on and off when the remote peer is added or deleted.
+ */
+ if (ovpn->mode == OVPN_MODE_MP)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ return register_netdevice(dev);
+}
+
+static void ovpn_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ cancel_delayed_work_sync(&ovpn->keepalive_work);
+ ovpn_peers_free(ovpn, NULL, OVPN_DEL_PEER_REASON_TEARDOWN);
+ unregister_netdevice_queue(dev, head);
+}
+
+static int ovpn_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ovpn_priv *ovpn = netdev_priv(dev);
+
+ if (nla_put_u8(skb, IFLA_OVPN_MODE, ovpn->mode))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static struct rtnl_link_ops ovpn_link_ops = {
+ .kind = "ovpn",
+ .netns_refund = false,
+ .priv_size = sizeof(struct ovpn_priv),
+ .setup = ovpn_setup,
+ .policy = ovpn_policy,
+ .maxtype = IFLA_OVPN_MAX,
+ .newlink = ovpn_newlink,
+ .dellink = ovpn_dellink,
+ .fill_info = ovpn_fill_info,
+};
+
+static int __init ovpn_init(void)
+{
+ int err = rtnl_link_register(&ovpn_link_ops);
+
+ if (err) {
+ pr_err("ovpn: can't register rtnl link ops: %d\n", err);
+ return err;
+ }
+
+ err = ovpn_nl_register();
+ if (err) {
+ pr_err("ovpn: can't register netlink family: %d\n", err);
+ goto unreg_rtnl;
+ }
+
+ ovpn_tcp_init();
+
+ return 0;
+
+unreg_rtnl:
+ rtnl_link_unregister(&ovpn_link_ops);
+ return err;
+}
+
+static __exit void ovpn_cleanup(void)
+{
+ ovpn_nl_unregister();
+ rtnl_link_unregister(&ovpn_link_ops);
+
+ rcu_barrier();
+}
+
+module_init(ovpn_init);
+module_exit(ovpn_cleanup);
+
+MODULE_DESCRIPTION("OpenVPN data channel offload (ovpn)");
+MODULE_AUTHOR("Antonio Quartulli <antonio@openvpn.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ovpn/main.h b/drivers/net/ovpn/main.h
new file mode 100644
index 000000000000..017cd0100765
--- /dev/null
+++ b/drivers/net/ovpn/main.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_MAIN_H_
+#define _NET_OVPN_MAIN_H_
+
+bool ovpn_dev_is_valid(const struct net_device *dev);
+
+#endif /* _NET_OVPN_MAIN_H_ */
diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c
new file mode 100644
index 000000000000..58e1a4342378
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink-gen.h"
+
+#include <uapi/linux/ovpn.h>
+
+/* Integer value ranges */
+static const struct netlink_range_validation ovpn_a_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+static const struct netlink_range_validation ovpn_a_keyconf_peer_id_range = {
+ .max = 16777215ULL,
+};
+
+/* Common nested types */
+const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7),
+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2),
+ [OVPN_A_KEYCONF_ENCRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+ [OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
+};
+
+const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = {
+ [OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256),
+ [OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE),
+};
+
+const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, },
+ [OVPN_A_PEER_SOCKET_NETNSID] = { .type = NLA_S32, },
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+ [OVPN_A_PEER_DEL_REASON] = NLA_POLICY_MAX(NLA_U32, 4),
+ [OVPN_A_PEER_VPN_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_VPN_TX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_BYTES] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_RX_PACKETS] = { .type = NLA_UINT, },
+ [OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_UINT, },
+};
+
+/* OVPN_CMD_PEER_NEW - do */
+static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_SET - do */
+static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - do */
+static const struct nla_policy ovpn_peer_get_do_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_PEER_GET - dump */
+static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+};
+
+/* OVPN_CMD_PEER_DEL - do */
+static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+};
+
+/* OVPN_CMD_KEY_NEW - do */
+static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_GET - do */
+static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_SWAP - do */
+static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* OVPN_CMD_KEY_DEL - do */
+static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = {
+ [OVPN_A_IFINDEX] = { .type = NLA_U32, },
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+};
+
+/* Ops table for ovpn */
+static const struct genl_split_ops ovpn_nl_ops[] = {
+ {
+ .cmd = OVPN_CMD_PEER_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_new_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_SET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_set_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_set_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_get_do_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_GET,
+ .dumpit = ovpn_nl_peer_get_dumpit,
+ .policy = ovpn_peer_get_dump_nl_policy,
+ .maxattr = OVPN_A_IFINDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = OVPN_CMD_PEER_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_peer_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_peer_del_nl_policy,
+ .maxattr = OVPN_A_PEER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_NEW,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_new_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_new_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_GET,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_get_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_get_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_SWAP,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_swap_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_swap_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = OVPN_CMD_KEY_DEL,
+ .pre_doit = ovpn_nl_pre_doit,
+ .doit = ovpn_nl_key_del_doit,
+ .post_doit = ovpn_nl_post_doit,
+ .policy = ovpn_key_del_nl_policy,
+ .maxattr = OVPN_A_KEYCONF,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+};
+
+static const struct genl_multicast_group ovpn_nl_mcgrps[] = {
+ [OVPN_NLGRP_PEERS] = { "peers", },
+};
+
+struct genl_family ovpn_nl_family __ro_after_init = {
+ .name = OVPN_FAMILY_NAME,
+ .version = OVPN_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = ovpn_nl_ops,
+ .n_split_ops = ARRAY_SIZE(ovpn_nl_ops),
+ .mcgrps = ovpn_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ovpn_nl_mcgrps),
+};
diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h
new file mode 100644
index 000000000000..66a4e4a0a055
--- /dev/null
+++ b/drivers/net/ovpn/netlink-gen.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/ovpn.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_OVPN_GEN_H
+#define _LINUX_OVPN_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+/* Common nested types */
+extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1];
+extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1];
+extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1];
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+void
+ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info);
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info);
+
+enum {
+ OVPN_NLGRP_PEERS,
+};
+
+extern struct genl_family ovpn_nl_family;
+
+#endif /* _LINUX_OVPN_GEN_H */
diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c
new file mode 100644
index 000000000000..bea03913bfb1
--- /dev/null
+++ b/drivers/net/ovpn/netlink.c
@@ -0,0 +1,1258 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/ovpn.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "netlink.h"
+#include "netlink-gen.h"
+#include "bind.h"
+#include "crypto.h"
+#include "peer.h"
+#include "socket.h"
+
+MODULE_ALIAS_GENL_FAMILY(OVPN_FAMILY_NAME);
+
+/**
+ * ovpn_get_dev_from_attrs - retrieve the ovpn private data from the netdevice
+ * a netlink message is targeting
+ * @net: network namespace where to look for the interface
+ * @info: generic netlink info from the user request
+ * @tracker: tracker object to be used for the netdev reference acquisition
+ *
+ * Return: the ovpn private data, if found, or an error otherwise
+ */
+static struct ovpn_priv *
+ovpn_get_dev_from_attrs(struct net *net, const struct genl_info *info,
+ netdevice_tracker *tracker)
+{
+ struct ovpn_priv *ovpn;
+ struct net_device *dev;
+ int ifindex;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_IFINDEX))
+ return ERR_PTR(-EINVAL);
+
+ ifindex = nla_get_u32(info->attrs[OVPN_A_IFINDEX]);
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, ifindex);
+ if (!dev) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "ifindex does not match any interface");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!ovpn_dev_is_valid(dev)) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "specified interface is not ovpn");
+ NL_SET_BAD_ATTR(info->extack, info->attrs[OVPN_A_IFINDEX]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ovpn = netdev_priv(dev);
+ netdev_hold(dev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return ovpn;
+}
+
+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = ovpn_get_dev_from_attrs(genl_info_net(info),
+ info, tracker);
+
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ info->user_ptr[0] = ovpn;
+
+ return 0;
+}
+
+void ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+
+ if (ovpn)
+ netdev_put(ovpn->dev, tracker);
+}
+
+static bool ovpn_nl_attr_sockaddr_remote(struct nlattr **attrs,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ struct in6_addr *in6;
+ __be16 port = 0;
+ __be32 *in;
+
+ ss->ss_family = AF_UNSPEC;
+
+ if (attrs[OVPN_A_PEER_REMOTE_PORT])
+ port = nla_get_be16(attrs[OVPN_A_PEER_REMOTE_PORT]);
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4]) {
+ ss->ss_family = AF_INET;
+ in = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV4]);
+ } else if (attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ ss->ss_family = AF_INET6;
+ in6 = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ } else {
+ return false;
+ }
+
+ switch (ss->ss_family) {
+ case AF_INET6:
+ /* If this is a regular IPv6 just break and move on,
+ * otherwise switch to AF_INET and extract the IPv4 accordingly
+ */
+ if (!ipv6_addr_v4mapped(in6)) {
+ sin6 = (struct sockaddr_in6 *)ss;
+ sin6->sin6_port = port;
+ memcpy(&sin6->sin6_addr, in6, sizeof(*in6));
+ break;
+ }
+
+ /* v4-mapped-v6 address */
+ ss->ss_family = AF_INET;
+ in = &in6->s6_addr32[3];
+ fallthrough;
+ case AF_INET:
+ sin = (struct sockaddr_in *)ss;
+ sin->sin_port = port;
+ sin->sin_addr.s_addr = *in;
+ break;
+ }
+
+ return true;
+}
+
+static u8 *ovpn_nl_attr_local_ip(struct nlattr **attrs)
+{
+ u8 *addr6;
+
+ if (!attrs[OVPN_A_PEER_LOCAL_IPV4] && !attrs[OVPN_A_PEER_LOCAL_IPV6])
+ return NULL;
+
+ if (attrs[OVPN_A_PEER_LOCAL_IPV4])
+ return nla_data(attrs[OVPN_A_PEER_LOCAL_IPV4]);
+
+ addr6 = nla_data(attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ /* this is an IPv4-mapped IPv6 address, therefore extract the actual
+ * v4 address from the last 4 bytes
+ */
+ if (ipv6_addr_v4mapped((struct in6_addr *)addr6))
+ return addr6 + 12;
+
+ return addr6;
+}
+
+static sa_family_t ovpn_nl_family_get(struct nlattr *addr4,
+ struct nlattr *addr6)
+{
+ if (addr4)
+ return AF_INET;
+
+ if (addr6) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)nla_data(addr6)))
+ return AF_INET;
+ return AF_INET6;
+ }
+
+ return AF_UNSPEC;
+}
+
+static int ovpn_nl_peer_precheck(struct ovpn_priv *ovpn,
+ struct genl_info *info,
+ struct nlattr **attrs)
+{
+ sa_family_t local_fam, remote_fam;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ if (attrs[OVPN_A_PEER_REMOTE_IPV4] && attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify both remote IPv4 or IPv6 address");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6] && attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote port without IP address");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6]) &&
+ !attrs[OVPN_A_PEER_REMOTE_PORT]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify remote IP address without port");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV4]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPv4 address without remote");
+ return -EINVAL;
+ }
+
+ if (!attrs[OVPN_A_PEER_REMOTE_IPV6] &&
+ attrs[OVPN_A_PEER_LOCAL_IPV6]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify local IPV6 address without remote");
+ return -EINVAL;
+ }
+
+ /* check that local and remote address families are the same even
+ * after parsing v4mapped IPv6 addresses.
+ * (if addresses are not provided, family will be AF_UNSPEC and
+ * the check is skipped)
+ */
+ local_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_LOCAL_IPV4],
+ attrs[OVPN_A_PEER_LOCAL_IPV6]);
+ remote_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_REMOTE_IPV4],
+ attrs[OVPN_A_PEER_REMOTE_IPV6]);
+ if (local_fam != AF_UNSPEC && remote_fam != AF_UNSPEC &&
+ local_fam != remote_fam) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "mismatching local and remote address families");
+ return -EINVAL;
+ }
+
+ if (remote_fam != AF_INET6 && attrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "cannot specify scope id without remote IPv6 address");
+ return -EINVAL;
+ }
+
+ /* VPN IPs are needed only in MP mode for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_P2P && (attrs[OVPN_A_PEER_VPN_IPV4] ||
+ attrs[OVPN_A_PEER_VPN_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected VPN IP in P2P mode");
+ return -EINVAL;
+ }
+
+ if ((attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ !attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) ||
+ (!attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "keepalive interval and timeout are required together");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_modify - modify the peer attributes according to the incoming msg
+ * @peer: the peer to modify
+ * @info: generic netlink info from the user request
+ * @attrs: the attributes from the user request
+ *
+ * Return: a negative error code in case of failure, 0 on success or 1 on
+ * success and the VPN IPs have been modified (requires rehashing in MP
+ * mode)
+ */
+static int ovpn_nl_peer_modify(struct ovpn_peer *peer, struct genl_info *info,
+ struct nlattr **attrs)
+{
+ struct sockaddr_storage ss = {};
+ void *local_ip = NULL;
+ u32 interv, timeout;
+ bool rehash = false;
+ int ret;
+
+ spin_lock_bh(&peer->lock);
+
+ if (ovpn_nl_attr_sockaddr_remote(attrs, &ss)) {
+ /* we carry the local IP in a generic container.
+ * ovpn_peer_reset_sockaddr() will properly interpret it
+ * based on ss.ss_family
+ */
+ local_ip = ovpn_nl_attr_local_ip(attrs);
+
+ /* set peer sockaddr */
+ ret = ovpn_peer_reset_sockaddr(peer, &ss, local_ip);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot set peer sockaddr: %d",
+ ret);
+ goto err_unlock;
+ }
+ dst_cache_reset(&peer->dst_cache);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV4]) {
+ rehash = true;
+ peer->vpn_addrs.ipv4.s_addr =
+ nla_get_in_addr(attrs[OVPN_A_PEER_VPN_IPV4]);
+ }
+
+ if (attrs[OVPN_A_PEER_VPN_IPV6]) {
+ rehash = true;
+ peer->vpn_addrs.ipv6 =
+ nla_get_in6_addr(attrs[OVPN_A_PEER_VPN_IPV6]);
+ }
+
+ /* when setting the keepalive, both parameters have to be configured */
+ if (attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] &&
+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) {
+ interv = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]);
+ timeout = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]);
+ ovpn_peer_keepalive_set(peer, interv, timeout);
+ }
+
+ netdev_dbg(peer->ovpn->dev,
+ "modify peer id=%u endpoint=%pIScp VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n",
+ peer->id, &ss,
+ &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6);
+
+ spin_unlock_bh(&peer->lock);
+
+ return rehash ? 1 : 0;
+err_unlock:
+ spin_unlock_bh(&peer->lock);
+ return ret;
+}
+
+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *ovpn_sock;
+ struct socket *sock = NULL;
+ struct ovpn_peer *peer;
+ u32 sockfd, peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_SOCKET))
+ return -EINVAL;
+
+ /* in MP mode VPN IPs are required for selecting the right peer */
+ if (ovpn->mode == OVPN_MODE_MP && !attrs[OVPN_A_PEER_VPN_IPV4] &&
+ !attrs[OVPN_A_PEER_VPN_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "VPN IP must be provided in MP mode");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_new(ovpn, peer_id);
+ if (IS_ERR(peer)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot create new peer object for peer %u: %ld",
+ peer_id, PTR_ERR(peer));
+ return PTR_ERR(peer);
+ }
+
+ /* lookup the fd in the kernel table and extract the socket object */
+ sockfd = nla_get_u32(attrs[OVPN_A_PEER_SOCKET]);
+ /* sockfd_lookup() increases sock's refcounter */
+ sock = sockfd_lookup(sockfd, &ret);
+ if (!sock) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot lookup peer socket (fd=%u): %d",
+ sockfd, ret);
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ /* Only when using UDP as transport protocol the remote endpoint
+ * can be configured so that ovpn knows where to send packets to.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_UDP &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV4] &&
+ !attrs[OVPN_A_PEER_REMOTE_IPV6]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "missing remote IP address for UDP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ /* In case of TCP, the socket is connected to the peer and ovpn
+ * will just send bytes over it, without the need to specify a
+ * destination.
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ sockfd_put(sock);
+ ret = -EINVAL;
+ goto peer_release;
+ }
+
+ ovpn_sock = ovpn_socket_new(sock, peer);
+ /* at this point we unconditionally drop the reference to the socket:
+ * - in case of error, the socket has to be dropped
+ * - if case of success, the socket is configured and let
+ * userspace own the reference, so that the latter can
+ * trigger the final close()
+ */
+ sockfd_put(sock);
+ if (IS_ERR(ovpn_sock)) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot encapsulate socket: %ld",
+ PTR_ERR(ovpn_sock));
+ ret = -ENOTSOCK;
+ goto peer_release;
+ }
+
+ rcu_assign_pointer(peer->sock, ovpn_sock);
+
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0)
+ goto sock_release;
+
+ ret = ovpn_peer_add(ovpn, peer);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot add new peer (id=%u) to hashtable: %d",
+ peer->id, ret);
+ goto sock_release;
+ }
+
+ return 0;
+
+sock_release:
+ ovpn_socket_release(peer);
+peer_release:
+ /* release right away because peer was not yet hashed, thus it is not
+ * used in any context
+ */
+ ovpn_peer_release(peer);
+
+ return ret;
+}
+
+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs);
+ if (ret < 0)
+ return ret;
+
+ if (attrs[OVPN_A_PEER_SOCKET]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "socket cannot be modified");
+ return -EINVAL;
+ }
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ /* when using a TCP socket the remote IP is not expected */
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (sock && sock->sock->sk->sk_protocol == IPPROTO_TCP &&
+ (attrs[OVPN_A_PEER_REMOTE_IPV4] ||
+ attrs[OVPN_A_PEER_REMOTE_IPV6])) {
+ rcu_read_unlock();
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected remote IP address with TCP socket");
+ ovpn_peer_put(peer);
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ spin_lock_bh(&ovpn->lock);
+ ret = ovpn_nl_peer_modify(peer, info, attrs);
+ if (ret < 0) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return ret;
+ }
+
+ /* ret == 1 means that VPN IPv4/6 has been modified and rehashing
+ * is required
+ */
+ if (ret > 0)
+ ovpn_peer_hash_vpn_ip(peer);
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info,
+ const struct ovpn_peer *peer, u32 portid, u32 seq,
+ int flags)
+{
+ const struct ovpn_bind *bind;
+ struct ovpn_socket *sock;
+ int ret = -EMSGSIZE;
+ struct nlattr *attr;
+ __be16 local_port;
+ void *hdr;
+ int id;
+
+ hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags,
+ OVPN_CMD_PEER_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_PEER);
+ if (!attr)
+ goto err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (!net_eq(genl_info_net(info), sock_net(sock->sock->sk))) {
+ id = peernet2id_alloc(genl_info_net(info),
+ sock_net(sock->sock->sk),
+ GFP_ATOMIC);
+ if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id))
+ goto err_unlock;
+ }
+ local_port = inet_sk(sock->sock->sk)->inet_sport;
+ rcu_read_unlock();
+
+ if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id))
+ goto err;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY))
+ if (nla_put_in_addr(skb, OVPN_A_PEER_VPN_IPV4,
+ peer->vpn_addrs.ipv4.s_addr))
+ goto err;
+
+ if (!ipv6_addr_equal(&peer->vpn_addrs.ipv6, &in6addr_any))
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_VPN_IPV6,
+ &peer->vpn_addrs.ipv6))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_INTERVAL,
+ peer->keepalive_interval) ||
+ nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_TIMEOUT,
+ peer->keepalive_timeout))
+ goto err;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (bind) {
+ if (bind->remote.in4.sin_family == AF_INET) {
+ if (nla_put_in_addr(skb, OVPN_A_PEER_REMOTE_IPV4,
+ bind->remote.in4.sin_addr.s_addr) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in4.sin_port) ||
+ nla_put_in_addr(skb, OVPN_A_PEER_LOCAL_IPV4,
+ bind->local.ipv4.s_addr))
+ goto err_unlock;
+ } else if (bind->remote.in4.sin_family == AF_INET6) {
+ if (nla_put_in6_addr(skb, OVPN_A_PEER_REMOTE_IPV6,
+ &bind->remote.in6.sin6_addr) ||
+ nla_put_u32(skb, OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID,
+ bind->remote.in6.sin6_scope_id) ||
+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT,
+ bind->remote.in6.sin6_port) ||
+ nla_put_in6_addr(skb, OVPN_A_PEER_LOCAL_IPV6,
+ &bind->local.ipv6))
+ goto err_unlock;
+ }
+ }
+ rcu_read_unlock();
+
+ if (nla_put_net16(skb, OVPN_A_PEER_LOCAL_PORT, local_port) ||
+ /* VPN RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_BYTES,
+ atomic64_read(&peer->vpn_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_PACKETS,
+ atomic64_read(&peer->vpn_stats.rx.packets)) ||
+ /* VPN TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_BYTES,
+ atomic64_read(&peer->vpn_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_PACKETS,
+ atomic64_read(&peer->vpn_stats.tx.packets)) ||
+ /* link RX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_BYTES,
+ atomic64_read(&peer->link_stats.rx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_PACKETS,
+ atomic64_read(&peer->link_stats.rx.packets)) ||
+ /* link TX stats */
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_BYTES,
+ atomic64_read(&peer->link_stats.tx.bytes)) ||
+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_PACKETS,
+ atomic64_read(&peer->link_stats.tx.packets)))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err:
+ genlmsg_cancel(skb, hdr);
+ return ret;
+}
+
+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_peer(msg, info, peer, info->snd_portid,
+ info->snd_seq, 0);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+ int bkt, last_idx = cb->args[1], dumped = 0;
+ netdevice_tracker tracker;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+
+ ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info, &tracker);
+ if (IS_ERR(ovpn))
+ return PTR_ERR(ovpn);
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* if we already dumped a peer it means we are done */
+ if (last_idx)
+ goto out;
+
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (peer) {
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) == 0)
+ dumped++;
+ }
+ rcu_read_unlock();
+ } else {
+ rcu_read_lock();
+ hash_for_each_rcu(ovpn->peers->by_id, bkt, peer,
+ hash_entry_id) {
+ /* skip already dumped peers that were dumped by
+ * previous invocations
+ */
+ if (last_idx > 0) {
+ last_idx--;
+ continue;
+ }
+
+ if (ovpn_nl_send_peer(skb, info, peer,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI) < 0)
+ break;
+
+ /* count peers being dumped during this invocation */
+ dumped++;
+ }
+ rcu_read_unlock();
+ }
+
+out:
+ netdev_put(ovpn->dev, &tracker);
+
+ /* sum up peers dumped in this message, so that at the next invocation
+ * we can continue from where we left
+ */
+ cb->args[1] += dumped;
+ return skb->len;
+}
+
+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
+ ovpn_peer_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs,
+ OVPN_A_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ netdev_dbg(ovpn->dev, "del peer %u\n", peer->id);
+ ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE);
+ ovpn_peer_put(peer);
+
+ return ret;
+}
+
+static int ovpn_nl_get_key_dir(struct genl_info *info, struct nlattr *key,
+ enum ovpn_cipher_alg cipher,
+ struct ovpn_key_direction *dir)
+{
+ struct nlattr *attrs[OVPN_A_KEYDIR_MAX + 1];
+ int ret;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYDIR_MAX, key,
+ ovpn_keydir_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ switch (cipher) {
+ case OVPN_CIPHER_ALG_AES_GCM:
+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
+ if (NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_CIPHER_KEY) ||
+ NL_REQ_ATTR_CHECK(info->extack, key, attrs,
+ OVPN_A_KEYDIR_NONCE_TAIL))
+ return -EINVAL;
+
+ dir->cipher_key = nla_data(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+ dir->cipher_key_size = nla_len(attrs[OVPN_A_KEYDIR_CIPHER_KEY]);
+
+ /* These algorithms require a 96bit nonce,
+ * Construct it by combining 4-bytes packet id and
+ * 8-bytes nonce-tail from userspace
+ */
+ dir->nonce_tail = nla_data(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ dir->nonce_tail_size = nla_len(attrs[OVPN_A_KEYDIR_NONCE_TAIL]);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "unsupported cipher");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_key_new_doit - configure a new key for the specified peer
+ * @skb: incoming netlink message
+ * @info: genetlink metadata
+ *
+ * This function allows the user to install a new key in the peer crypto
+ * state.
+ * Each peer has two 'slots', namely 'primary' and 'secondary', where
+ * keys can be installed. The key in the 'primary' slot is used for
+ * encryption, while both keys can be used for decryption by matching the
+ * key ID carried in the incoming packet.
+ *
+ * The user is responsible for rotating keys when necessary. The user
+ * may fetch peer traffic statistics via netlink in order to better
+ * identify the right time to rotate keys.
+ * The renegotiation follows these steps:
+ * 1. a new key is computed by the user and is installed in the 'secondary'
+ * slot
+ * 2. at user discretion (usually after a predetermined time) 'primary' and
+ * 'secondary' contents are swapped and the new key starts being used for
+ * encryption, while the old key is kept around for decryption of late
+ * packets.
+ *
+ * Return: 0 on success or a negative error code otherwise.
+ */
+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_peer_key_reset pkr;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_KEY_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_CIPHER_ALG) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_ENCRYPT_DIR) ||
+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_DECRYPT_DIR))
+ return -EINVAL;
+
+ pkr.slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+ pkr.key.key_id = nla_get_u32(attrs[OVPN_A_KEYCONF_KEY_ID]);
+ pkr.key.cipher_alg = nla_get_u32(attrs[OVPN_A_KEYCONF_CIPHER_ALG]);
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_ENCRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.encrypt);
+ if (ret < 0)
+ return ret;
+
+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_DECRYPT_DIR],
+ pkr.key.cipher_alg, &pkr.key.decrypt);
+ if (ret < 0)
+ return ret;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to set key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ret = ovpn_crypto_state_reset(&peer->crypto, &pkr);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot install new key for peer %u",
+ peer_id);
+ goto out;
+ }
+
+ netdev_dbg(ovpn->dev, "new key installed (id=%u) for peer %u\n",
+ pkr.key.key_id, peer_id);
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_nl_send_key(struct sk_buff *skb, const struct genl_info *info,
+ u32 peer_id, enum ovpn_key_slot slot,
+ const struct ovpn_key_config *keyconf)
+{
+ struct nlattr *attr;
+ void *hdr;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &ovpn_nl_family,
+ 0, OVPN_CMD_KEY_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ attr = nla_nest_start(skb, OVPN_A_KEYCONF);
+ if (!attr)
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_PEER_ID, peer_id))
+ goto err;
+
+ if (nla_put_u32(skb, OVPN_A_KEYCONF_SLOT, slot) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_KEY_ID, keyconf->key_id) ||
+ nla_put_u32(skb, OVPN_A_KEYCONF_CIPHER_ALG, keyconf->cipher_alg))
+ goto err;
+
+ nla_nest_end(skb, attr);
+ genlmsg_end(skb, hdr);
+
+ return 0;
+err:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct ovpn_key_config keyconf = { 0 };
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ struct sk_buff *msg;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot find peer with id %u", peer_id);
+ return -ENOENT;
+ }
+
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ ret = ovpn_crypto_config_get(&peer->crypto, slot, &keyconf);
+ if (ret < 0) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "cannot extract key from slot %u for peer %u",
+ slot, peer_id);
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ovpn_nl_send_key(msg, info, peer->id, slot, &keyconf);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ goto err;
+ }
+
+ ret = genlmsg_reply(msg, info);
+err:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1];
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to swap keys for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slots_swap(&peer->crypto);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1];
+ struct ovpn_priv *ovpn = info->user_ptr[0];
+ enum ovpn_key_slot slot;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
+ return -EINVAL;
+
+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
+ info->attrs[OVPN_A_KEYCONF],
+ ovpn_keyconf_nl_policy, info->extack);
+ if (ret)
+ return ret;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_PEER_ID))
+ return -EINVAL;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs,
+ OVPN_A_KEYCONF_SLOT))
+ return -EINVAL;
+
+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]);
+
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+ if (!peer) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "no peer with id %u to delete key for",
+ peer_id);
+ return -ENOENT;
+ }
+
+ ovpn_crypto_key_slot_delete(&peer->crypto, slot);
+ ovpn_peer_put(peer);
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_peer_del_notify - notify userspace about peer being deleted
+ * @peer: the peer being deleted
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ struct sk_buff *msg;
+ struct nlattr *attr;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n",
+ peer->id, peer->delete_reason);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_PEER_DEL_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ attr = nla_nest_start(msg, OVPN_A_PEER);
+ if (!attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_DEL_REASON, peer->delete_reason))
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, attr);
+
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk),
+ msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_key_swap_notify - notify userspace peer's key must be renewed
+ * @peer: the peer whose key needs to be renewed
+ * @key_id: the ID of the key that needs to be renewed
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id)
+{
+ struct ovpn_socket *sock;
+ struct nlattr *k_attr;
+ struct sk_buff *msg;
+ int ret = -EMSGSIZE;
+ void *hdr;
+
+ netdev_info(peer->ovpn->dev, "peer with id %u must rekey - primary key unusable.\n",
+ peer->id);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_KEY_SWAP_NTF);
+ if (!hdr) {
+ ret = -ENOBUFS;
+ goto err_free_msg;
+ }
+
+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex))
+ goto err_cancel_msg;
+
+ k_attr = nla_nest_start(msg, OVPN_A_KEYCONF);
+ if (!k_attr)
+ goto err_cancel_msg;
+
+ if (nla_put_u32(msg, OVPN_A_KEYCONF_PEER_ID, peer->id))
+ goto err_cancel_msg;
+
+ if (nla_put_u16(msg, OVPN_A_KEYCONF_KEY_ID, key_id))
+ goto err_cancel_msg;
+
+ nla_nest_end(msg, k_attr);
+ genlmsg_end(msg, hdr);
+
+ rcu_read_lock();
+ sock = rcu_dereference(peer->sock);
+ if (!sock) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk),
+ msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ return 0;
+err_unlock:
+ rcu_read_unlock();
+err_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+err_free_msg:
+ nlmsg_free(msg);
+ return ret;
+}
+
+/**
+ * ovpn_nl_register - perform any needed registration in the NL subsustem
+ *
+ * Return: 0 on success, a negative error code otherwise
+ */
+int __init ovpn_nl_register(void)
+{
+ int ret = genl_register_family(&ovpn_nl_family);
+
+ if (ret) {
+ pr_err("ovpn: genl_register_family failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ovpn_nl_unregister - undo any module wide netlink registration
+ */
+void ovpn_nl_unregister(void)
+{
+ genl_unregister_family(&ovpn_nl_family);
+}
diff --git a/drivers/net/ovpn/netlink.h b/drivers/net/ovpn/netlink.h
new file mode 100644
index 000000000000..8615dfc3c472
--- /dev/null
+++ b/drivers/net/ovpn/netlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_NETLINK_H_
+#define _NET_OVPN_NETLINK_H_
+
+int ovpn_nl_register(void);
+void ovpn_nl_unregister(void);
+
+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer);
+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id);
+
+#endif /* _NET_OVPN_NETLINK_H_ */
diff --git a/drivers/net/ovpn/ovpnpriv.h b/drivers/net/ovpn/ovpnpriv.h
new file mode 100644
index 000000000000..5898f6adada7
--- /dev/null
+++ b/drivers/net/ovpn/ovpnpriv.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTRUCT_H_
+#define _NET_OVPN_OVPNSTRUCT_H_
+
+#include <linux/workqueue.h>
+#include <net/gro_cells.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/ovpn.h>
+
+/**
+ * struct ovpn_peer_collection - container of peers for MultiPeer mode
+ * @by_id: table of peers index by ID
+ * @by_vpn_addr4: table of peers indexed by VPN IPv4 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_vpn_addr6: table of peers indexed by VPN IPv6 address (items can be
+ * rehashed on the fly due to peer IP change)
+ * @by_transp_addr: table of peers indexed by transport address (items can be
+ * rehashed on the fly due to peer IP change)
+ */
+struct ovpn_peer_collection {
+ DECLARE_HASHTABLE(by_id, 12);
+ struct hlist_nulls_head by_vpn_addr4[1 << 12];
+ struct hlist_nulls_head by_vpn_addr6[1 << 12];
+ struct hlist_nulls_head by_transp_addr[1 << 12];
+};
+
+/**
+ * struct ovpn_priv - per ovpn interface state
+ * @dev: the actual netdev representing the tunnel
+ * @mode: device operation mode (i.e. p2p, mp, ..)
+ * @lock: protect this object
+ * @peers: data structures holding multi-peer references
+ * @peer: in P2P mode, this is the only remote peer
+ * @gro_cells: pointer to the Generic Receive Offload cell
+ * @keepalive_work: struct used to schedule keepalive periodic job
+ */
+struct ovpn_priv {
+ struct net_device *dev;
+ enum ovpn_mode mode;
+ spinlock_t lock; /* protect writing to the ovpn_priv object */
+ struct ovpn_peer_collection *peers;
+ struct ovpn_peer __rcu *peer;
+ struct gro_cells gro_cells;
+ struct delayed_work keepalive_work;
+};
+
+#endif /* _NET_OVPN_OVPNSTRUCT_H_ */
diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c
new file mode 100644
index 000000000000..a1fd27b9c038
--- /dev/null
+++ b/drivers/net/ovpn/peer.c
@@ -0,0 +1,1364 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+#include <net/ip6_route.h>
+
+#include "ovpnpriv.h"
+#include "bind.h"
+#include "pktid.h"
+#include "crypto.h"
+#include "io.h"
+#include "main.h"
+#include "netlink.h"
+#include "peer.h"
+#include "socket.h"
+
+static void unlock_ovpn(struct ovpn_priv *ovpn,
+ struct llist_head *release_list)
+ __releases(&ovpn->lock)
+{
+ struct ovpn_peer *peer;
+
+ spin_unlock_bh(&ovpn->lock);
+
+ llist_for_each_entry(peer, release_list->first, release_entry) {
+ ovpn_socket_release(peer);
+ ovpn_peer_put(peer);
+ }
+}
+
+/**
+ * ovpn_peer_keepalive_set - configure keepalive values for peer
+ * @peer: the peer to configure
+ * @interval: outgoing keepalive interval
+ * @timeout: incoming keepalive timeout
+ */
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout)
+{
+ time64_t now = ktime_get_real_seconds();
+
+ netdev_dbg(peer->ovpn->dev,
+ "scheduling keepalive for peer %u: interval=%u timeout=%u\n",
+ peer->id, interval, timeout);
+
+ peer->keepalive_interval = interval;
+ WRITE_ONCE(peer->last_sent, now);
+ peer->keepalive_xmit_exp = now + interval;
+
+ peer->keepalive_timeout = timeout;
+ WRITE_ONCE(peer->last_recv, now);
+ peer->keepalive_recv_exp = now + timeout;
+
+ /* now that interval and timeout have been changed, kick
+ * off the worker so that the next delay can be recomputed
+ */
+ mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0);
+}
+
+/**
+ * ovpn_peer_keepalive_send - periodic worker sending keepalive packets
+ * @work: pointer to the work member of the related peer object
+ *
+ * NOTE: the reference to peer is not dropped because it gets inherited
+ * by ovpn_xmit_special()
+ */
+static void ovpn_peer_keepalive_send(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ keepalive_work);
+
+ local_bh_disable();
+ ovpn_xmit_special(peer, ovpn_keepalive_message,
+ sizeof(ovpn_keepalive_message));
+ local_bh_enable();
+}
+
+/**
+ * ovpn_peer_new - allocate and initialize a new peer object
+ * @ovpn: the openvpn instance inside which the peer should be created
+ * @id: the ID assigned to this peer
+ *
+ * Return: a pointer to the new peer on success or an error code otherwise
+ */
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id)
+{
+ struct ovpn_peer *peer;
+ int ret;
+
+ /* alloc and init peer object */
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return ERR_PTR(-ENOMEM);
+
+ peer->id = id;
+ peer->ovpn = ovpn;
+
+ peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY);
+ peer->vpn_addrs.ipv6 = in6addr_any;
+
+ RCU_INIT_POINTER(peer->bind, NULL);
+ ovpn_crypto_state_init(&peer->crypto);
+ spin_lock_init(&peer->lock);
+ kref_init(&peer->refcount);
+ ovpn_peer_stats_init(&peer->vpn_stats);
+ ovpn_peer_stats_init(&peer->link_stats);
+ INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send);
+
+ ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(ovpn->dev,
+ "cannot initialize dst cache for peer %u\n",
+ peer->id);
+ kfree(peer);
+ return ERR_PTR(ret);
+ }
+
+ netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL);
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_reset_sockaddr - recreate binding for peer
+ * @peer: peer to recreate the binding for
+ * @ss: sockaddr to use as remote endpoint for the binding
+ * @local_ip: local IP for the binding
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip)
+{
+ struct ovpn_bind *bind;
+ size_t ip_len;
+
+ lockdep_assert_held(&peer->lock);
+
+ /* create new ovpn_bind object */
+ bind = ovpn_bind_from_sockaddr(ss);
+ if (IS_ERR(bind))
+ return PTR_ERR(bind);
+
+ if (local_ip) {
+ if (ss->ss_family == AF_INET) {
+ ip_len = sizeof(struct in_addr);
+ } else if (ss->ss_family == AF_INET6) {
+ ip_len = sizeof(struct in6_addr);
+ } else {
+ net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n",
+ netdev_name(peer->ovpn->dev),
+ ss->ss_family, peer->id);
+ kfree(bind);
+ return -EINVAL;
+ }
+
+ memcpy(&bind->local, local_ip, ip_len);
+ }
+
+ /* set binding */
+ ovpn_bind_reset(peer, bind);
+
+ return 0;
+}
+
+/* variable name __tbl2 needs to be different from __tbl1
+ * in the macro below to avoid confusing clang
+ */
+#define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl2 = &(_tbl); \
+ jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \
+})
+
+#define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \
+ typeof(_tbl) *__tbl1 = &(_tbl); \
+ &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\
+})
+
+/**
+ * ovpn_peer_endpoints_update - update remote or local endpoint for peer
+ * @peer: peer to update the remote endpoint for
+ * @skb: incoming packet to retrieve the source/destination address from
+ */
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_storage ss;
+ struct sockaddr_in6 *sa6;
+ bool reset_cache = false;
+ struct sockaddr_in *sa;
+ struct ovpn_bind *bind;
+ const void *local_ip;
+ size_t salen = 0;
+
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind))
+ goto unlock;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ip_hdr(skb)->daddr;
+ sa = (struct sockaddr_in *)&ss;
+ sa->sin_family = AF_INET;
+ sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa->sin_port = udp_hdr(skb)->source;
+ salen = sizeof(*sa);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) {
+ net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv4.s_addr,
+ &ip_hdr(skb)->daddr);
+ bind->local.ipv4.s_addr = ip_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ case htons(ETH_P_IPV6):
+ /* float check */
+ if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) {
+ /* unconditionally save local endpoint in case
+ * of float, as it may have changed as well
+ */
+ local_ip = &ipv6_hdr(skb)->daddr;
+ sa6 = (struct sockaddr_in6 *)&ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr,
+ skb->skb_iif);
+ salen = sizeof(*sa6);
+ reset_cache = true;
+ break;
+ }
+
+ /* if no float happened, let's double check if the local endpoint
+ * has changed
+ */
+ if (unlikely(!ipv6_addr_equal(&bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr))) {
+ net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c)\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, &bind->local.ipv6,
+ &ipv6_hdr(skb)->daddr);
+ bind->local.ipv6 = ipv6_hdr(skb)->daddr;
+ reset_cache = true;
+ }
+ break;
+ default:
+ goto unlock;
+ }
+
+ if (unlikely(reset_cache))
+ dst_cache_reset(&peer->dst_cache);
+
+ /* if the peer did not float, we can bail out now */
+ if (likely(!salen))
+ goto unlock;
+
+ if (unlikely(ovpn_peer_reset_sockaddr(peer,
+ (struct sockaddr_storage *)&ss,
+ local_ip) < 0))
+ goto unlock;
+
+ net_dbg_ratelimited("%s: peer %d floated to %pIScp",
+ netdev_name(peer->ovpn->dev), peer->id, &ss);
+
+ spin_unlock_bh(&peer->lock);
+
+ /* rehashing is required only in MP mode as P2P has one peer
+ * only and thus there is no hashtable
+ */
+ if (peer->ovpn->mode == OVPN_MODE_MP) {
+ spin_lock_bh(&peer->ovpn->lock);
+ spin_lock_bh(&peer->lock);
+ bind = rcu_dereference_protected(peer->bind,
+ lockdep_is_held(&peer->lock));
+ if (unlikely(!bind)) {
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ return;
+ }
+
+ /* This function may be invoked concurrently, therefore another
+ * float may have happened in parallel: perform rehashing
+ * using the peer->bind->remote directly as key
+ */
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ salen = sizeof(*sa);
+ break;
+ case AF_INET6:
+ salen = sizeof(*sa6);
+ break;
+ }
+
+ /* remove old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ /* re-add with new transport address */
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr,
+ &bind->remote, salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ spin_unlock_bh(&peer->lock);
+ spin_unlock_bh(&peer->ovpn->lock);
+ }
+ return;
+unlock:
+ spin_unlock_bh(&peer->lock);
+}
+
+/**
+ * ovpn_peer_release_rcu - RCU callback performing last peer release steps
+ * @head: RCU member of the ovpn_peer
+ */
+static void ovpn_peer_release_rcu(struct rcu_head *head)
+{
+ struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu);
+
+ /* this call will immediately free the dst_cache, therefore we
+ * perform it in the RCU callback, when all contexts are done
+ */
+ dst_cache_destroy(&peer->dst_cache);
+ kfree(peer);
+}
+
+/**
+ * ovpn_peer_release - release peer private members
+ * @peer: the peer to release
+ */
+void ovpn_peer_release(struct ovpn_peer *peer)
+{
+ ovpn_crypto_state_release(&peer->crypto);
+ spin_lock_bh(&peer->lock);
+ ovpn_bind_reset(peer, NULL);
+ spin_unlock_bh(&peer->lock);
+ call_rcu(&peer->rcu, ovpn_peer_release_rcu);
+ netdev_put(peer->ovpn->dev, &peer->dev_tracker);
+}
+
+/**
+ * ovpn_peer_release_kref - callback for kref_put
+ * @kref: the kref object belonging to the peer
+ */
+void ovpn_peer_release_kref(struct kref *kref)
+{
+ struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount);
+
+ ovpn_peer_release(peer);
+}
+
+/**
+ * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address
+ * @skb: the packet to extract data from
+ * @ss: the sockaddr to fill
+ *
+ * Return: sockaddr length on success or -1 otherwise
+ */
+static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb,
+ struct sockaddr_storage *ss)
+{
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ sa4 = (struct sockaddr_in *)ss;
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sa4->sin_port = udp_hdr(skb)->source;
+ return sizeof(*sa4);
+ case htons(ETH_P_IPV6):
+ sa6 = (struct sockaddr_in6 *)ss;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = ipv6_hdr(skb)->saddr;
+ sa6->sin6_port = udp_hdr(skb)->source;
+ return sizeof(*sa6);
+ }
+
+ return -1;
+}
+
+/**
+ * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv4 of the nexthop
+ */
+static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb)
+{
+ const struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_uses_gateway)
+ return rt->rt_gw4;
+
+ return ip_hdr(skb)->daddr;
+}
+
+/**
+ * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb
+ * @skb: the outgoing packet
+ *
+ * Return: the IPv6 of the nexthop
+ */
+static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb)
+{
+ const struct rt6_info *rt = skb_rt6_info(skb);
+
+ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
+ return ipv6_hdr(skb)->daddr;
+
+ return rt->rt6i_gateway;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv4 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn,
+ __be32 addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr,
+ sizeof(addr));
+ nhead = &ovpn->peers->by_vpn_addr4[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4)
+ if (addr == tmp->vpn_addrs.ipv4.s_addr)
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address
+ * @ovpn: the openvpn instance to search
+ * @addr: VPN IPv6 to use as search key
+ *
+ * Refcounter is not increased for the returned peer.
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn,
+ struct in6_addr *addr)
+{
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ struct ovpn_peer *tmp;
+ unsigned int slot;
+
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr,
+ sizeof(*addr));
+ nhead = &ovpn->peers->by_vpn_addr6[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6)
+ if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6))
+ return tmp;
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (get_nulls_value(ntmp) != slot)
+ goto begin;
+
+ return NULL;
+}
+
+/**
+ * ovpn_peer_transp_match - check if sockaddr and peer binding match
+ * @peer: the peer to get the binding from
+ * @ss: the sockaddr to match
+ *
+ * Return: true if sockaddr and binding match or false otherwise
+ */
+static bool ovpn_peer_transp_match(const struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss)
+{
+ struct ovpn_bind *bind = rcu_dereference(peer->bind);
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+
+ if (unlikely(!bind))
+ return false;
+
+ if (ss->ss_family != bind->remote.in4.sin_family)
+ return false;
+
+ switch (ss->ss_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)ss;
+ if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr)
+ return false;
+ if (sa4->sin_port != bind->remote.in4.sin_port)
+ return false;
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)ss;
+ if (!ipv6_addr_equal(&sa6->sin6_addr,
+ &bind->remote.in6.sin6_addr))
+ return false;
+ if (sa6->sin6_port != bind->remote.in6.sin6_port)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P
+ * instance
+ * @ovpn: the openvpn instance to search
+ * @ss: the transport socket address
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *
+ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn,
+ struct sockaddr_storage *ss)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && ovpn_peer_transp_match(tmp, ss) &&
+ ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_transp_addr - retrieve peer by transport address
+ * @ovpn: the openvpn instance to search
+ * @skb: the skb to retrieve the source transport address from
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct sockaddr_storage ss = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct hlist_nulls_node *ntmp;
+ unsigned int slot;
+ ssize_t sa_len;
+
+ sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss);
+ if (unlikely(sa_len < 0))
+ return NULL;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss);
+
+ rcu_read_lock();
+begin:
+ slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len);
+ nhead = &ovpn->peers->by_transp_addr[slot];
+
+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead,
+ hash_entry_transp_addr) {
+ if (!ovpn_peer_transp_match(tmp, &ss))
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+
+ /* item may have moved during lookup - check nulls and restart
+ * if that's the case
+ */
+ if (!peer && get_nulls_value(ntmp) != slot)
+ goto begin;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the ID of the peer to find
+ *
+ * Return: the peer if found or NULL otherwise
+ */
+static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn,
+ u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(ovpn->peer);
+ if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp)))
+ peer = tmp;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_peer_get_by_id - retrieve peer by ID
+ * @ovpn: the openvpn instance to search
+ * @peer_id: the unique peer identifier to match
+ *
+ * Return: a pointer to the peer if found or NULL otherwise
+ */
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id)
+{
+ struct ovpn_peer *tmp, *peer = NULL;
+ struct hlist_head *head;
+
+ if (ovpn->mode == OVPN_MODE_P2P)
+ return ovpn_peer_get_by_id_p2p(ovpn, peer_id);
+
+ head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id,
+ sizeof(peer_id));
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, head, hash_entry_id) {
+ if (tmp->id != peer_id)
+ continue;
+
+ if (!ovpn_peer_hold(tmp))
+ continue;
+
+ peer = tmp;
+ break;
+ }
+ rcu_read_unlock();
+
+ return peer;
+}
+
+static void ovpn_peer_remove(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ /* prevent double remove */
+ if (hlist_unhashed(&peer->hash_entry_id))
+ return;
+
+ hlist_del_init_rcu(&peer->hash_entry_id);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr);
+ break;
+ case OVPN_MODE_P2P:
+ /* prevent double remove */
+ if (peer != rcu_access_pointer(peer->ovpn->peer))
+ return;
+
+ RCU_INIT_POINTER(peer->ovpn->peer, NULL);
+ /* in P2P mode the carrier is switched off when the peer is
+ * deleted so that third party protocols can react accordingly
+ */
+ netif_carrier_off(peer->ovpn->dev);
+ break;
+ }
+
+ peer->delete_reason = reason;
+ ovpn_nl_peer_del_notify(peer);
+
+ /* append to provided list for later socket release and ref drop */
+ llist_add(&peer->release_entry, release_list);
+}
+
+/**
+ * ovpn_peer_get_by_dst - Lookup peer to send skb to
+ * @ovpn: the private data representing the current VPN session
+ * @skb: the skb to extract the destination address from
+ *
+ * This function takes a tunnel packet and looks up the peer to send it to
+ * after encapsulation. The skb is expected to be the in-tunnel packet, without
+ * any OpenVPN related header.
+ *
+ * Assume that the IP header is accessible in the skb data.
+ *
+ * Return: the peer if found or NULL otherwise.
+ */
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = NULL;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ /* in P2P mode, no matter the destination, packets are always sent to
+ * the single peer listening on the other side
+ */
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ rcu_read_lock();
+ peer = rcu_dereference(ovpn->peer);
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+ return peer;
+ }
+
+ rcu_read_lock();
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_skb4(skb);
+ peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4);
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_skb6(skb);
+ peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6);
+ break;
+ }
+
+ if (unlikely(peer && !ovpn_peer_hold(peer)))
+ peer = NULL;
+ rcu_read_unlock();
+
+ return peer;
+}
+
+/**
+ * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv4 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .daddr = dest
+ };
+
+ rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL);
+ if (IS_ERR(rt)) {
+ net_dbg_ratelimited("%s: no route to host %pI4\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ if (!rt->rt_uses_gateway)
+ goto out;
+
+ dest = rt->rt_gw4;
+out:
+ ip_rt_put(rt);
+ return dest;
+}
+
+/**
+ * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination
+ * @ovpn: the private data representing the current VPN session
+ * @dest: the destination to be looked up
+ *
+ * Looks up in the IPv6 system routing table the IP of the nexthop to be used
+ * to reach the destination passed as argument. If no nexthop can be found, the
+ * destination itself is returned as it probably has to be used as nexthop.
+ *
+ * Return: the IP of the next hop if found or dest itself otherwise
+ */
+static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn,
+ struct in6_addr dest)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *entry;
+ struct rt6_info *rt;
+ struct flowi6 fl = {
+ .daddr = dest,
+ };
+
+ entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl,
+ NULL);
+ if (IS_ERR(entry)) {
+ net_dbg_ratelimited("%s: no route to host %pI6c\n",
+ netdev_name(ovpn->dev), &dest);
+ /* if we end up here this packet is probably going to be
+ * thrown away later
+ */
+ return dest;
+ }
+
+ rt = dst_rt6_info(entry);
+
+ if (!(rt->rt6i_flags & RTF_GATEWAY))
+ goto out;
+
+ dest = rt->rt6i_gateway;
+out:
+ dst_release((struct dst_entry *)rt);
+#endif
+ return dest;
+}
+
+/**
+ * ovpn_peer_check_by_src - check that skb source is routed via peer
+ * @ovpn: the openvpn instance to search
+ * @skb: the packet to extract source address from
+ * @peer: the peer to check against the source address
+ *
+ * Return: true if the peer is matching or false otherwise
+ */
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer)
+{
+ bool match = false;
+ struct in6_addr addr6;
+ __be32 addr4;
+
+ if (ovpn->mode == OVPN_MODE_P2P) {
+ /* in P2P mode, no matter the destination, packets are always
+ * sent to the single peer listening on the other side
+ */
+ return peer == rcu_access_pointer(ovpn->peer);
+ }
+
+ /* This function performs a reverse path check, therefore we now
+ * lookup the nexthop we would use if we wanted to route a packet
+ * to the source IP. If the nexthop matches the sender we know the
+ * latter is valid and we allow the packet to come in
+ */
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4));
+ rcu_read_unlock();
+ break;
+ case htons(ETH_P_IPV6):
+ addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr);
+ rcu_read_lock();
+ match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6));
+ rcu_read_unlock();
+ break;
+ }
+
+ return match;
+}
+
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer)
+{
+ struct hlist_nulls_head *nhead;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ /* rehashing makes sense only in multipeer mode */
+ if (peer->ovpn->mode != OVPN_MODE_MP)
+ return;
+
+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4,
+ &peer->vpn_addrs.ipv4,
+ sizeof(peer->vpn_addrs.ipv4));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead);
+ }
+
+ if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) {
+ /* remove potential old hashing */
+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6);
+
+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6,
+ &peer->vpn_addrs.ipv6,
+ sizeof(peer->vpn_addrs.ipv6));
+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead);
+ }
+}
+
+/**
+ * ovpn_peer_add_mp - add peer to related tables in a MP instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ struct sockaddr_storage sa = { 0 };
+ struct hlist_nulls_head *nhead;
+ struct sockaddr_in6 *sa6;
+ struct sockaddr_in *sa4;
+ struct ovpn_bind *bind;
+ struct ovpn_peer *tmp;
+ size_t salen;
+ int ret = 0;
+
+ spin_lock_bh(&ovpn->lock);
+ /* do not add duplicates */
+ tmp = ovpn_peer_get_by_id(ovpn, peer->id);
+ if (tmp) {
+ ovpn_peer_put(tmp);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ bind = rcu_dereference_protected(peer->bind, true);
+ /* peers connected via TCP have bind == NULL */
+ if (bind) {
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ sa4 = (struct sockaddr_in *)&sa;
+
+ sa4->sin_family = AF_INET;
+ sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr;
+ sa4->sin_port = bind->remote.in4.sin_port;
+ salen = sizeof(*sa4);
+ break;
+ case AF_INET6:
+ sa6 = (struct sockaddr_in6 *)&sa;
+
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = bind->remote.in6.sin6_addr;
+ sa6->sin6_port = bind->remote.in6.sin6_port;
+ salen = sizeof(*sa6);
+ break;
+ default:
+ ret = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa,
+ salen);
+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead);
+ }
+
+ hlist_add_head_rcu(&peer->hash_entry_id,
+ ovpn_get_hash_head(ovpn->peers->by_id, &peer->id,
+ sizeof(peer->id)));
+
+ ovpn_peer_hash_vpn_ip(peer);
+out:
+ spin_unlock_bh(&ovpn->lock);
+ return ret;
+}
+
+/**
+ * ovpn_peer_add_p2p - add peer to related tables in a P2P instance
+ * @ovpn: the instance to add the peer to
+ * @peer: the peer to add
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *tmp;
+
+ spin_lock_bh(&ovpn->lock);
+ /* in p2p mode it is possible to have a single peer only, therefore the
+ * old one is released and substituted by the new one
+ */
+ tmp = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (tmp)
+ ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN,
+ &release_list);
+
+ rcu_assign_pointer(ovpn->peer, peer);
+ /* in P2P mode the carrier is switched on when the peer is added */
+ netif_carrier_on(ovpn->dev);
+ unlock_ovpn(ovpn, &release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_add - add peer to the related tables
+ * @ovpn: the openvpn instance the peer belongs to
+ * @peer: the peer object to add
+ *
+ * Assume refcounter was increased by caller
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ return ovpn_peer_add_mp(ovpn, peer);
+ case OVPN_MODE_P2P:
+ return ovpn_peer_add_p2p(ovpn, peer);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_peer_del_mp - delete peer from related tables in a MP instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_mp(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+ int ret = -ENOENT;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id);
+ if (tmp == peer) {
+ ovpn_peer_remove(peer, reason, release_list);
+ ret = 0;
+ }
+
+ if (tmp)
+ ovpn_peer_put(tmp);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance
+ * @peer: the peer to delete
+ * @reason: reason why the peer was deleted (sent to userspace)
+ * @release_list: list where delete peer should be appended
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_peer_del_p2p(struct ovpn_peer *peer,
+ enum ovpn_del_peer_reason reason,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *tmp;
+
+ lockdep_assert_held(&peer->ovpn->lock);
+
+ tmp = rcu_dereference_protected(peer->ovpn->peer,
+ lockdep_is_held(&peer->ovpn->lock));
+ if (tmp != peer)
+ return -ENOENT;
+
+ ovpn_peer_remove(peer, reason, release_list);
+
+ return 0;
+}
+
+/**
+ * ovpn_peer_del - delete peer from related tables
+ * @peer: the peer object to delete
+ * @reason: reason for deleting peer (will be sent to userspace)
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason)
+{
+ LLIST_HEAD(release_list);
+ int ret = -EOPNOTSUPP;
+
+ spin_lock_bh(&peer->ovpn->lock);
+ switch (peer->ovpn->mode) {
+ case OVPN_MODE_MP:
+ ret = ovpn_peer_del_mp(peer, reason, &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ ret = ovpn_peer_del_p2p(peer, reason, &release_list);
+ break;
+ default:
+ break;
+ }
+ unlock_ovpn(peer->ovpn, &release_list);
+
+ return ret;
+}
+
+/**
+ * ovpn_peer_release_p2p - release peer upon P2P device teardown
+ * @ovpn: the instance being torn down
+ * @sk: if not NULL, release peer only if it's using this specific socket
+ * @reason: the reason for releasing the peer
+ */
+static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+
+ spin_lock_bh(&ovpn->lock);
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (!peer) {
+ spin_unlock_bh(&ovpn->lock);
+ return;
+ }
+
+ if (sk) {
+ ovpn_sock = rcu_access_pointer(peer->sock);
+ if (!ovpn_sock || ovpn_sock->sock->sk != sk) {
+ spin_unlock_bh(&ovpn->lock);
+ ovpn_peer_put(peer);
+ return;
+ }
+ }
+
+ ovpn_peer_remove(peer, reason, &release_list);
+ unlock_ovpn(ovpn, &release_list);
+}
+
+static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ struct ovpn_socket *ovpn_sock;
+ LLIST_HEAD(release_list);
+ struct ovpn_peer *peer;
+ struct hlist_node *tmp;
+ int bkt;
+
+ spin_lock_bh(&ovpn->lock);
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ bool remove = true;
+
+ /* if a socket was passed as argument, skip all peers except
+ * those using it
+ */
+ if (sk) {
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference(peer->sock);
+ remove = ovpn_sock && ovpn_sock->sock->sk == sk;
+ rcu_read_unlock();
+ }
+
+ if (remove)
+ ovpn_peer_remove(peer, reason, &release_list);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
+
+/**
+ * ovpn_peers_free - free all peers in the instance
+ * @ovpn: the instance whose peers should be released
+ * @sk: if not NULL, only peers using this socket are removed and the socket
+ * is released immediately
+ * @reason: the reason for releasing all peers
+ */
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk,
+ enum ovpn_del_peer_reason reason)
+{
+ switch (ovpn->mode) {
+ case OVPN_MODE_P2P:
+ ovpn_peer_release_p2p(ovpn, sk, reason);
+ break;
+ case OVPN_MODE_MP:
+ ovpn_peers_release_mp(ovpn, sk, reason);
+ break;
+ }
+}
+
+static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t last_recv, last_sent, next_run1, next_run2;
+ unsigned long timeout, interval;
+ bool expired;
+
+ spin_lock_bh(&peer->lock);
+ /* we expect both timers to be configured at the same time,
+ * therefore bail out if either is not set
+ */
+ if (!peer->keepalive_timeout || !peer->keepalive_interval) {
+ spin_unlock_bh(&peer->lock);
+ return 0;
+ }
+
+ /* check for peer timeout */
+ expired = false;
+ timeout = peer->keepalive_timeout;
+ last_recv = READ_ONCE(peer->last_recv);
+ if (now < last_recv + timeout) {
+ peer->keepalive_recv_exp = last_recv + timeout;
+ next_run1 = peer->keepalive_recv_exp;
+ } else if (peer->keepalive_recv_exp > now) {
+ next_run1 = peer->keepalive_recv_exp;
+ } else {
+ expired = true;
+ }
+
+ if (expired) {
+ /* peer is dead -> kill it and move on */
+ spin_unlock_bh(&peer->lock);
+ netdev_dbg(peer->ovpn->dev, "peer %u expired\n",
+ peer->id);
+ ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED,
+ release_list);
+ return 0;
+ }
+
+ /* check for peer keepalive */
+ expired = false;
+ interval = peer->keepalive_interval;
+ last_sent = READ_ONCE(peer->last_sent);
+ if (now < last_sent + interval) {
+ peer->keepalive_xmit_exp = last_sent + interval;
+ next_run2 = peer->keepalive_xmit_exp;
+ } else if (peer->keepalive_xmit_exp > now) {
+ next_run2 = peer->keepalive_xmit_exp;
+ } else {
+ expired = true;
+ next_run2 = now + interval;
+ }
+ spin_unlock_bh(&peer->lock);
+
+ if (expired) {
+ /* a keepalive packet is required */
+ netdev_dbg(peer->ovpn->dev,
+ "sending keepalive to peer %u\n",
+ peer->id);
+ if (schedule_work(&peer->keepalive_work))
+ ovpn_peer_hold(peer);
+ }
+
+ if (next_run1 < next_run2)
+ return next_run1;
+
+ return next_run2;
+}
+
+static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ time64_t tmp_next_run, next_run = 0;
+ struct hlist_node *tmp;
+ struct ovpn_peer *peer;
+ int bkt;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) {
+ tmp_next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+ if (!tmp_next_run)
+ continue;
+
+ /* the next worker run will be scheduled based on the shortest
+ * required interval across all peers
+ */
+ if (!next_run || tmp_next_run < next_run)
+ next_run = tmp_next_run;
+ }
+
+ return next_run;
+}
+
+static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn,
+ time64_t now,
+ struct llist_head *release_list)
+{
+ struct ovpn_peer *peer;
+ time64_t next_run = 0;
+
+ lockdep_assert_held(&ovpn->lock);
+
+ peer = rcu_dereference_protected(ovpn->peer,
+ lockdep_is_held(&ovpn->lock));
+ if (peer)
+ next_run = ovpn_peer_keepalive_work_single(peer, now,
+ release_list);
+
+ return next_run;
+}
+
+/**
+ * ovpn_peer_keepalive_work - run keepalive logic on each known peer
+ * @work: pointer to the work member of the related ovpn object
+ *
+ * Each peer has two timers (if configured):
+ * 1. peer timeout: when no data is received for a certain interval,
+ * the peer is considered dead and it gets killed.
+ * 2. peer keepalive: when no data is sent to a certain peer for a
+ * certain interval, a special 'keepalive' packet is explicitly sent.
+ *
+ * This function iterates across the whole peer collection while
+ * checking the timers described above.
+ */
+void ovpn_peer_keepalive_work(struct work_struct *work)
+{
+ struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv,
+ keepalive_work.work);
+ time64_t next_run = 0, now = ktime_get_real_seconds();
+ LLIST_HEAD(release_list);
+
+ spin_lock_bh(&ovpn->lock);
+ switch (ovpn->mode) {
+ case OVPN_MODE_MP:
+ next_run = ovpn_peer_keepalive_work_mp(ovpn, now,
+ &release_list);
+ break;
+ case OVPN_MODE_P2P:
+ next_run = ovpn_peer_keepalive_work_p2p(ovpn, now,
+ &release_list);
+ break;
+ }
+
+ /* prevent rearming if the interface is being destroyed */
+ if (next_run > 0) {
+ netdev_dbg(ovpn->dev,
+ "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n",
+ next_run, now, next_run - now);
+ schedule_delayed_work(&ovpn->keepalive_work,
+ (next_run - now) * HZ);
+ }
+ unlock_ovpn(ovpn, &release_list);
+}
diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h
new file mode 100644
index 000000000000..a1423f2b09e0
--- /dev/null
+++ b/drivers/net/ovpn/peer.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPEER_H_
+#define _NET_OVPN_OVPNPEER_H_
+
+#include <net/dst_cache.h>
+#include <net/strparser.h>
+
+#include "crypto.h"
+#include "socket.h"
+#include "stats.h"
+
+/**
+ * struct ovpn_peer - the main remote peer object
+ * @ovpn: main openvpn instance this peer belongs to
+ * @dev_tracker: reference tracker for associated dev
+ * @id: unique identifier
+ * @vpn_addrs: IP addresses assigned over the tunnel
+ * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel
+ * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel
+ * @hash_entry_id: entry in the peer ID hashtable
+ * @hash_entry_addr4: entry in the peer IPv4 hashtable
+ * @hash_entry_addr6: entry in the peer IPv6 hashtable
+ * @hash_entry_transp_addr: entry in the peer transport address hashtable
+ * @sock: the socket being used to talk to this peer
+ * @tcp: keeps track of TCP specific state
+ * @tcp.strp: stream parser context (TCP only)
+ * @tcp.user_queue: received packets that have to go to userspace (TCP only)
+ * @tcp.out_queue: packets on hold while socket is taken by user (TCP only)
+ * @tcp.tx_in_progress: true if TX is already ongoing (TCP only)
+ * @tcp.out_msg.skb: packet scheduled for sending (TCP only)
+ * @tcp.out_msg.offset: offset where next send should start (TCP only)
+ * @tcp.out_msg.len: remaining data to send within packet (TCP only)
+ * @tcp.sk_cb.sk_data_ready: pointer to original cb (TCP only)
+ * @tcp.sk_cb.sk_write_space: pointer to original cb (TCP only)
+ * @tcp.sk_cb.prot: pointer to original prot object (TCP only)
+ * @tcp.sk_cb.ops: pointer to the original prot_ops object (TCP only)
+ * @crypto: the crypto configuration (ciphers, keys, etc..)
+ * @dst_cache: cache for dst_entry used to send to peer
+ * @bind: remote peer binding
+ * @keepalive_interval: seconds after which a new keepalive should be sent
+ * @keepalive_xmit_exp: future timestamp when next keepalive should be sent
+ * @last_sent: timestamp of the last successfully sent packet
+ * @keepalive_timeout: seconds after which an inactive peer is considered dead
+ * @keepalive_recv_exp: future timestamp when the peer should expire
+ * @last_recv: timestamp of the last authenticated received packet
+ * @vpn_stats: per-peer in-VPN TX/RX stats
+ * @link_stats: per-peer link/transport TX/RX stats
+ * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..)
+ * @lock: protects binding to peer (bind) and keepalive* fields
+ * @refcount: reference counter
+ * @rcu: used to free peer in an RCU safe way
+ * @release_entry: entry for the socket release list
+ * @keepalive_work: used to schedule keepalive sending
+ */
+struct ovpn_peer {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ u32 id;
+ struct {
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ } vpn_addrs;
+ struct hlist_node hash_entry_id;
+ struct hlist_nulls_node hash_entry_addr4;
+ struct hlist_nulls_node hash_entry_addr6;
+ struct hlist_nulls_node hash_entry_transp_addr;
+ struct ovpn_socket __rcu *sock;
+
+ struct {
+ struct strparser strp;
+ struct sk_buff_head user_queue;
+ struct sk_buff_head out_queue;
+ bool tx_in_progress;
+
+ struct {
+ struct sk_buff *skb;
+ int offset;
+ int len;
+ } out_msg;
+
+ struct {
+ void (*sk_data_ready)(struct sock *sk);
+ void (*sk_write_space)(struct sock *sk);
+ struct proto *prot;
+ const struct proto_ops *ops;
+ } sk_cb;
+
+ struct work_struct defer_del_work;
+ } tcp;
+ struct ovpn_crypto_state crypto;
+ struct dst_cache dst_cache;
+ struct ovpn_bind __rcu *bind;
+ unsigned long keepalive_interval;
+ unsigned long keepalive_xmit_exp;
+ time64_t last_sent;
+ unsigned long keepalive_timeout;
+ unsigned long keepalive_recv_exp;
+ time64_t last_recv;
+ struct ovpn_peer_stats vpn_stats;
+ struct ovpn_peer_stats link_stats;
+ enum ovpn_del_peer_reason delete_reason;
+ spinlock_t lock; /* protects bind and keepalive* */
+ struct kref refcount;
+ struct rcu_head rcu;
+ struct llist_node release_entry;
+ struct work_struct keepalive_work;
+};
+
+/**
+ * ovpn_peer_hold - increase reference counter
+ * @peer: the peer whose counter should be increased
+ *
+ * Return: true if the counter was increased or false if it was zero already
+ */
+static inline bool ovpn_peer_hold(struct ovpn_peer *peer)
+{
+ return kref_get_unless_zero(&peer->refcount);
+}
+
+void ovpn_peer_release(struct ovpn_peer *peer);
+void ovpn_peer_release_kref(struct kref *kref);
+
+/**
+ * ovpn_peer_put - decrease reference counter
+ * @peer: the peer whose counter should be decreased
+ */
+static inline void ovpn_peer_put(struct ovpn_peer *peer)
+{
+ kref_put(&peer->refcount, ovpn_peer_release_kref);
+}
+
+struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id);
+int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer);
+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason);
+void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sock,
+ enum ovpn_del_peer_reason reason);
+
+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id);
+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn,
+ struct sk_buff *skb);
+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer);
+bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb,
+ struct ovpn_peer *peer);
+
+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout);
+void ovpn_peer_keepalive_work(struct work_struct *work);
+
+void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb);
+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer,
+ const struct sockaddr_storage *ss,
+ const void *local_ip);
+
+#endif /* _NET_OVPN_OVPNPEER_H_ */
diff --git a/drivers/net/ovpn/pktid.c b/drivers/net/ovpn/pktid.c
new file mode 100644
index 000000000000..2f29049897e3
--- /dev/null
+++ b/drivers/net/ovpn/pktid.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "pktid.h"
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid)
+{
+ atomic_set(&pid->seq_num, 1);
+}
+
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr)
+{
+ memset(pr, 0, sizeof(*pr));
+ spin_lock_init(&pr->lock);
+}
+
+/* Packet replay detection.
+ * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1.
+ */
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time)
+{
+ const unsigned long now = jiffies;
+ int ret;
+
+ /* ID must not be zero */
+ if (unlikely(pkt_id == 0))
+ return -EINVAL;
+
+ spin_lock_bh(&pr->lock);
+
+ /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */
+ if (unlikely(time_after_eq(now, pr->expire)))
+ pr->id_floor = pr->id;
+
+ /* time changed? */
+ if (unlikely(pkt_time != pr->time)) {
+ if (pkt_time > pr->time) {
+ /* time moved forward, accept */
+ pr->base = 0;
+ pr->extent = 0;
+ pr->id = 0;
+ pr->time = pkt_time;
+ pr->id_floor = 0;
+ } else {
+ /* time moved backward, reject */
+ ret = -ETIME;
+ goto out;
+ }
+ }
+
+ if (likely(pkt_id == pr->id + 1)) {
+ /* well-formed ID sequence (incremented by 1) */
+ pr->base = REPLAY_INDEX(pr->base, -1);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ if (pr->extent < REPLAY_WINDOW_SIZE)
+ ++pr->extent;
+ pr->id = pkt_id;
+ } else if (pkt_id > pr->id) {
+ /* ID jumped forward by more than one */
+ const unsigned int delta = pkt_id - pr->id;
+
+ if (delta < REPLAY_WINDOW_SIZE) {
+ unsigned int i;
+
+ pr->base = REPLAY_INDEX(pr->base, -delta);
+ pr->history[pr->base / 8] |= (1 << (pr->base % 8));
+ pr->extent += delta;
+ if (pr->extent > REPLAY_WINDOW_SIZE)
+ pr->extent = REPLAY_WINDOW_SIZE;
+ for (i = 1; i < delta; ++i) {
+ unsigned int newb = REPLAY_INDEX(pr->base, i);
+
+ pr->history[newb / 8] &= ~BIT(newb % 8);
+ }
+ } else {
+ pr->base = 0;
+ pr->extent = REPLAY_WINDOW_SIZE;
+ memset(pr->history, 0, sizeof(pr->history));
+ pr->history[0] = 1;
+ }
+ pr->id = pkt_id;
+ } else {
+ /* ID backtrack */
+ const unsigned int delta = pr->id - pkt_id;
+
+ if (delta > pr->max_backtrack)
+ pr->max_backtrack = delta;
+ if (delta < pr->extent) {
+ if (pkt_id > pr->id_floor) {
+ const unsigned int ri = REPLAY_INDEX(pr->base,
+ delta);
+ u8 *p = &pr->history[ri / 8];
+ const u8 mask = (1 << (ri % 8));
+
+ if (*p & mask) {
+ ret = -EINVAL;
+ goto out;
+ }
+ *p |= mask;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ pr->expire = now + PKTID_RECV_EXPIRE;
+ ret = 0;
+out:
+ spin_unlock_bh(&pr->lock);
+ return ret;
+}
diff --git a/drivers/net/ovpn/pktid.h b/drivers/net/ovpn/pktid.h
new file mode 100644
index 000000000000..0262d026d15e
--- /dev/null
+++ b/drivers/net/ovpn/pktid.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNPKTID_H_
+#define _NET_OVPN_OVPNPKTID_H_
+
+#include "proto.h"
+
+/* If no packets received for this length of time, set a backtrack floor
+ * at highest received packet ID thus far.
+ */
+#define PKTID_RECV_EXPIRE (30 * HZ)
+
+/* Packet-ID state for transmitter */
+struct ovpn_pktid_xmit {
+ atomic_t seq_num;
+};
+
+/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */
+#define REPLAY_WINDOW_ORDER 8
+
+#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER)
+#define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8)
+#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1))
+
+/* Packet-ID state for receiver.
+ * Other than lock member, can be zeroed to initialize.
+ */
+struct ovpn_pktid_recv {
+ /* "sliding window" bitmask of recent packet IDs received */
+ u8 history[REPLAY_WINDOW_BYTES];
+ /* bit position of deque base in history */
+ unsigned int base;
+ /* extent (in bits) of deque in history */
+ unsigned int extent;
+ /* expiration of history in jiffies */
+ unsigned long expire;
+ /* highest sequence number received */
+ u32 id;
+ /* highest time stamp received */
+ u32 time;
+ /* we will only accept backtrack IDs > id_floor */
+ u32 id_floor;
+ unsigned int max_backtrack;
+ /* protects entire pktd ID state */
+ spinlock_t lock;
+};
+
+/* Get the next packet ID for xmit */
+static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid)
+{
+ const u32 seq_num = atomic_fetch_add_unless(&pid->seq_num, 1, 0);
+ /* when the 32bit space is over, we return an error because the packet
+ * ID is used to create the cipher IV and we do not want to reuse the
+ * same value more than once
+ */
+ if (unlikely(!seq_num))
+ return -ERANGE;
+
+ *pktid = seq_num;
+
+ return 0;
+}
+
+/* Write 12-byte AEAD IV to dest */
+static inline void ovpn_pktid_aead_write(const u32 pktid,
+ const u8 nt[],
+ unsigned char *dest)
+{
+ *(__force __be32 *)(dest) = htonl(pktid);
+ BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE);
+ memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE);
+}
+
+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid);
+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr);
+
+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time);
+
+#endif /* _NET_OVPN_OVPNPKTID_H_ */
diff --git a/drivers/net/ovpn/proto.h b/drivers/net/ovpn/proto.h
new file mode 100644
index 000000000000..b7d285b4d9c1
--- /dev/null
+++ b/drivers/net/ovpn/proto.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_PROTO_H_
+#define _NET_OVPN_PROTO_H_
+
+#include "main.h"
+
+#include <linux/bitfield.h>
+#include <linux/skbuff.h>
+
+/* When the OpenVPN protocol is ran in AEAD mode, use
+ * the OpenVPN packet ID as the AEAD nonce:
+ *
+ * 00000005 521c3b01 4308c041
+ * [seq # ] [ nonce_tail ]
+ * [ 12-byte full IV ] -> OVPN_NONCE_SIZE
+ * [4-bytes -> OVPN_NONCE_WIRE_SIZE
+ * on wire]
+ */
+
+/* nonce size (96bits) as required by AEAD ciphers */
+#define OVPN_NONCE_SIZE 12
+/* last 8 bytes of AEAD nonce: provided by userspace and usually derived
+ * from key material generated during TLS handshake
+ */
+#define OVPN_NONCE_TAIL_SIZE 8
+
+/* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the
+ * size of the AEAD Associated Data (AD) sent over the wire
+ * and is normally the head of the IV
+ */
+#define OVPN_NONCE_WIRE_SIZE (OVPN_NONCE_SIZE - OVPN_NONCE_TAIL_SIZE)
+
+#define OVPN_OPCODE_SIZE 4 /* DATA_V2 opcode size */
+#define OVPN_OPCODE_KEYID_MASK 0x07000000
+#define OVPN_OPCODE_PKTTYPE_MASK 0xF8000000
+#define OVPN_OPCODE_PEERID_MASK 0x00FFFFFF
+
+/* packet opcodes of interest to us */
+#define OVPN_DATA_V1 6 /* data channel v1 packet */
+#define OVPN_DATA_V2 9 /* data channel v2 packet */
+
+#define OVPN_PEER_ID_UNDEF 0x00FFFFFF
+
+/**
+ * ovpn_opcode_from_skb - extract OP code from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the OP code
+ */
+static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PKTTYPE_MASK, opcode);
+}
+
+/**
+ * ovpn_peer_id_from_skb - extract peer ID from skb at specified offset
+ * @skb: the packet to extract the OP code from
+ * @offset: the offset in the data buffer where the OP code is located
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the peer ID
+ */
+static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset));
+
+ return FIELD_GET(OVPN_OPCODE_PEERID_MASK, opcode);
+}
+
+/**
+ * ovpn_key_id_from_skb - extract key ID from the skb head
+ * @skb: the packet to extract the key ID code from
+ *
+ * Note: this function assumes that the skb head was pulled enough
+ * to access the first 4 bytes.
+ *
+ * Return: the key ID
+ */
+static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb)
+{
+ u32 opcode = be32_to_cpu(*(__be32 *)skb->data);
+
+ return FIELD_GET(OVPN_OPCODE_KEYID_MASK, opcode);
+}
+
+/**
+ * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format
+ * @opcode: the OP code
+ * @key_id: the key ID
+ * @peer_id: the peer ID
+ *
+ * Return: a 4 bytes integer obtained combining all input values following the
+ * OpenVPN wire format. This integer can then be written to the packet header.
+ */
+static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id)
+{
+ return FIELD_PREP(OVPN_OPCODE_PKTTYPE_MASK, opcode) |
+ FIELD_PREP(OVPN_OPCODE_KEYID_MASK, key_id) |
+ FIELD_PREP(OVPN_OPCODE_PEERID_MASK, peer_id);
+}
+
+#endif /* _NET_OVPN_OVPNPROTO_H_ */
diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h
new file mode 100644
index 000000000000..64430880f1da
--- /dev/null
+++ b/drivers/net/ovpn/skb.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ * James Yonan <james@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SKB_H_
+#define _NET_OVPN_SKB_H_
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct ovpn_cb {
+ struct ovpn_peer *peer;
+ struct ovpn_crypto_key_slot *ks;
+ struct aead_request *req;
+ struct scatterlist *sg;
+ u8 *iv;
+ unsigned int payload_offset;
+ bool nosignal;
+};
+
+static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb));
+ return (struct ovpn_cb *)skb->cb;
+}
+
+/* Return IP protocol version from skb header.
+ * Return 0 if protocol is not IPv4/IPv6 or cannot be read.
+ */
+static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb)
+{
+ __be16 proto = 0;
+
+ /* skb could be non-linear,
+ * make sure IP header is in non-fragmented part
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+
+ if (ip_hdr(skb)->version == 4) {
+ proto = htons(ETH_P_IP);
+ } else if (ip_hdr(skb)->version == 6) {
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+ proto = htons(ETH_P_IPV6);
+ }
+
+ return proto;
+}
+
+#endif /* _NET_OVPN_SKB_H_ */
diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c
new file mode 100644
index 000000000000..a83cbab72591
--- /dev/null
+++ b/drivers/net/ovpn/socket.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "socket.h"
+#include "tcp.h"
+#include "udp.h"
+
+static void ovpn_socket_release_kref(struct kref *kref)
+{
+ struct ovpn_socket *sock = container_of(kref, struct ovpn_socket,
+ refcount);
+
+ if (sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ ovpn_udp_socket_detach(sock);
+ else if (sock->sock->sk->sk_protocol == IPPROTO_TCP)
+ ovpn_tcp_socket_detach(sock);
+}
+
+/**
+ * ovpn_socket_put - decrease reference counter
+ * @peer: peer whose socket reference counter should be decreased
+ * @sock: the RCU protected peer socket
+ *
+ * This function is only used internally. Users willing to release
+ * references to the ovpn_socket should use ovpn_socket_release()
+ *
+ * Return: true if the socket was released, false otherwise
+ */
+static bool ovpn_socket_put(struct ovpn_peer *peer, struct ovpn_socket *sock)
+{
+ return kref_put(&sock->refcount, ovpn_socket_release_kref);
+}
+
+/**
+ * ovpn_socket_release - release resources owned by socket user
+ * @peer: peer whose socket should be released
+ *
+ * This function should be invoked when the peer is being removed
+ * and wants to drop its link to the socket.
+ *
+ * In case of UDP, the detach routine will drop a reference to the
+ * ovpn netdev, pointed by the ovpn_socket.
+ *
+ * In case of TCP, releasing the socket will cause dropping
+ * the refcounter for the peer it is linked to, thus allowing the peer
+ * disappear as well.
+ *
+ * This function is expected to be invoked exactly once per peer
+ *
+ * NOTE: this function may sleep
+ */
+void ovpn_socket_release(struct ovpn_peer *peer)
+{
+ struct ovpn_socket *sock;
+ bool released;
+
+ might_sleep();
+
+ sock = rcu_replace_pointer(peer->sock, NULL, true);
+ /* release may be invoked after socket was detached */
+ if (!sock)
+ return;
+
+ /* sanity check: we should not end up here if the socket
+ * was already closed
+ */
+ if (!sock->sock->sk) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Drop the reference while holding the sock lock to avoid
+ * concurrent ovpn_socket_new call to mess up with a partially
+ * detached socket.
+ *
+ * Holding the lock ensures that a socket with refcnt 0 is fully
+ * detached before it can be picked by a concurrent reader.
+ */
+ lock_sock(sock->sock->sk);
+ released = ovpn_socket_put(peer, sock);
+ release_sock(sock->sock->sk);
+
+ /* align all readers with sk_user_data being NULL */
+ synchronize_rcu();
+
+ /* following cleanup should happen with lock released */
+ if (released) {
+ if (sock->sock->sk->sk_protocol == IPPROTO_UDP) {
+ netdev_put(sock->ovpn->dev, &sock->dev_tracker);
+ } else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) {
+ /* wait for TCP jobs to terminate */
+ ovpn_tcp_socket_wait_finish(sock);
+ ovpn_peer_put(sock->peer);
+ }
+ /* we can call plain kfree() because we already waited one RCU
+ * period due to synchronize_rcu()
+ */
+ kfree(sock);
+ }
+}
+
+static bool ovpn_socket_hold(struct ovpn_socket *sock)
+{
+ return kref_get_unless_zero(&sock->refcount);
+}
+
+static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer)
+{
+ if (sock->sock->sk->sk_protocol == IPPROTO_UDP)
+ return ovpn_udp_socket_attach(sock, peer->ovpn);
+ else if (sock->sock->sk->sk_protocol == IPPROTO_TCP)
+ return ovpn_tcp_socket_attach(sock, peer);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ovpn_socket_new - create a new socket and initialize it
+ * @sock: the kernel socket to embed
+ * @peer: the peer reachable via this socket
+ *
+ * Return: an openvpn socket on success or a negative error code otherwise
+ */
+struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer)
+{
+ struct ovpn_socket *ovpn_sock;
+ int ret;
+
+ lock_sock(sock->sk);
+
+ /* a TCP socket can only be owned by a single peer, therefore there
+ * can't be any other user
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ /* a UDP socket can be shared across multiple peers, but we must make
+ * sure it is not owned by something else
+ */
+ if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type);
+
+ /* socket owned by other encapsulation module */
+ if (type && type != UDP_ENCAP_OVPNINUDP) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ goto sock_release;
+ }
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ if (ovpn_sock) {
+ /* socket owned by another ovpn instance, we can't use it */
+ if (ovpn_sock->ovpn != peer->ovpn) {
+ ovpn_sock = ERR_PTR(-EBUSY);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ /* this socket is already owned by this instance,
+ * therefore we can increase the refcounter and
+ * use it as expected
+ */
+ if (WARN_ON(!ovpn_socket_hold(ovpn_sock))) {
+ /* this should never happen because setting
+ * the refcnt to 0 and detaching the socket
+ * is expected to be atomic
+ */
+ ovpn_sock = ERR_PTR(-EAGAIN);
+ rcu_read_unlock();
+ goto sock_release;
+ }
+
+ rcu_read_unlock();
+ goto sock_release;
+ }
+ rcu_read_unlock();
+ }
+
+ /* socket is not owned: attach to this ovpn instance */
+
+ ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL);
+ if (!ovpn_sock) {
+ ovpn_sock = ERR_PTR(-ENOMEM);
+ goto sock_release;
+ }
+
+ ovpn_sock->sock = sock;
+ kref_init(&ovpn_sock->refcount);
+
+ ret = ovpn_socket_attach(ovpn_sock, peer);
+ if (ret < 0) {
+ kfree(ovpn_sock);
+ ovpn_sock = ERR_PTR(ret);
+ goto sock_release;
+ }
+
+ /* TCP sockets are per-peer, therefore they are linked to their unique
+ * peer
+ */
+ if (sock->sk->sk_protocol == IPPROTO_TCP) {
+ INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work);
+ ovpn_sock->peer = peer;
+ ovpn_peer_hold(peer);
+ } else if (sock->sk->sk_protocol == IPPROTO_UDP) {
+ /* in UDP we only link the ovpn instance since the socket is
+ * shared among multiple peers
+ */
+ ovpn_sock->ovpn = peer->ovpn;
+ netdev_hold(peer->ovpn->dev, &ovpn_sock->dev_tracker,
+ GFP_KERNEL);
+ }
+
+ rcu_assign_sk_user_data(sock->sk, ovpn_sock);
+sock_release:
+ release_sock(sock->sk);
+ return ovpn_sock;
+}
diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h
new file mode 100644
index 000000000000..00d856b1a5d8
--- /dev/null
+++ b/drivers/net/ovpn/socket.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_SOCK_H_
+#define _NET_OVPN_SOCK_H_
+
+#include <linux/net.h>
+#include <linux/kref.h>
+#include <net/sock.h>
+
+struct ovpn_priv;
+struct ovpn_peer;
+
+/**
+ * struct ovpn_socket - a kernel socket referenced in the ovpn code
+ * @ovpn: ovpn instance owning this socket (UDP only)
+ * @dev_tracker: reference tracker for associated dev (UDP only)
+ * @peer: unique peer transmitting over this socket (TCP only)
+ * @sock: the low level sock object
+ * @refcount: amount of contexts currently referencing this object
+ * @work: member used to schedule release routine (it may block)
+ * @tcp_tx_work: work for deferring outgoing packet processing (TCP only)
+ */
+struct ovpn_socket {
+ union {
+ struct {
+ struct ovpn_priv *ovpn;
+ netdevice_tracker dev_tracker;
+ };
+ struct ovpn_peer *peer;
+ };
+
+ struct socket *sock;
+ struct kref refcount;
+ struct work_struct work;
+ struct work_struct tcp_tx_work;
+};
+
+struct ovpn_socket *ovpn_socket_new(struct socket *sock,
+ struct ovpn_peer *peer);
+void ovpn_socket_release(struct ovpn_peer *peer);
+
+#endif /* _NET_OVPN_SOCK_H_ */
diff --git a/drivers/net/ovpn/stats.c b/drivers/net/ovpn/stats.c
new file mode 100644
index 000000000000..d637143473bb
--- /dev/null
+++ b/drivers/net/ovpn/stats.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/atomic.h>
+
+#include "stats.h"
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps)
+{
+ atomic64_set(&ps->rx.bytes, 0);
+ atomic64_set(&ps->rx.packets, 0);
+
+ atomic64_set(&ps->tx.bytes, 0);
+ atomic64_set(&ps->tx.packets, 0);
+}
diff --git a/drivers/net/ovpn/stats.h b/drivers/net/ovpn/stats.h
new file mode 100644
index 000000000000..53433d8b6c33
--- /dev/null
+++ b/drivers/net/ovpn/stats.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2020-2025 OpenVPN, Inc.
+ *
+ * Author: James Yonan <james@openvpn.net>
+ * Antonio Quartulli <antonio@openvpn.net>
+ * Lev Stipakov <lev@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_OVPNSTATS_H_
+#define _NET_OVPN_OVPNSTATS_H_
+
+/* one stat */
+struct ovpn_peer_stat {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+/* rx and tx stats combined */
+struct ovpn_peer_stats {
+ struct ovpn_peer_stat rx;
+ struct ovpn_peer_stat tx;
+};
+
+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps);
+
+static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat,
+ const unsigned int n)
+{
+ atomic64_add(n, &stat->bytes);
+ atomic64_inc(&stat->packets);
+}
+
+static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->rx, n);
+}
+
+static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats,
+ const unsigned int n)
+{
+ ovpn_peer_stats_increment(&stats->tx, n);
+}
+
+#endif /* _NET_OVPN_OVPNSTATS_H_ */
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
new file mode 100644
index 000000000000..7c42d84987ad
--- /dev/null
+++ b/drivers/net/ovpn/tcp.c
@@ -0,0 +1,598 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/skbuff.h>
+#include <net/hotdata.h>
+#include <net/inet_common.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/transp_v6.h>
+#include <net/route.h>
+#include <trace/events/sock.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "skb.h"
+#include "tcp.h"
+
+#define OVPN_TCP_DEPTH_NESTING 2
+#if OVPN_TCP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
+#error "OVPN TCP requires its own lockdep subclass"
+#endif
+
+static struct proto ovpn_tcp_prot __ro_after_init;
+static struct proto_ops ovpn_tcp_ops __ro_after_init;
+static struct proto ovpn_tcp6_prot __ro_after_init;
+static struct proto_ops ovpn_tcp6_ops __ro_after_init;
+
+static int ovpn_tcp_parse(struct strparser *strp, struct sk_buff *skb)
+{
+ struct strp_msg *rxm = strp_msg(skb);
+ __be16 blen;
+ u16 len;
+ int err;
+
+ /* when packets are written to the TCP stream, they are prepended with
+ * two bytes indicating the actual packet size.
+ * Parse accordingly and return the actual size (including the size
+ * header)
+ */
+
+ if (skb->len < rxm->offset + 2)
+ return 0;
+
+ err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen));
+ if (err < 0)
+ return err;
+
+ len = be16_to_cpu(blen);
+ if (len < 2)
+ return -EINVAL;
+
+ return len + 2;
+}
+
+/* queue skb for sending to userspace via recvmsg on the socket */
+static void ovpn_tcp_to_userspace(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ skb_set_owner_r(skb, sk);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ skb_queue_tail(&peer->tcp.user_queue, skb);
+ peer->tcp.sk_cb.sk_data_ready(sk);
+}
+
+static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb)
+{
+ struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp);
+ struct strp_msg *msg = strp_msg(skb);
+ size_t pkt_len = msg->full_len - 2;
+ size_t off = msg->offset + 2;
+ u8 opcode;
+
+ /* ensure skb->data points to the beginning of the openvpn packet */
+ if (!pskb_pull(skb, off)) {
+ net_warn_ratelimited("%s: packet too small for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* strparser does not trim the skb for us, therefore we do it now */
+ if (pskb_trim(skb, pkt_len) != 0) {
+ net_warn_ratelimited("%s: trimming skb failed for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* we need the first 4 bytes of data to be accessible
+ * to extract the opcode and the key ID later on
+ */
+ if (!pskb_may_pull(skb, OVPN_OPCODE_SIZE)) {
+ net_warn_ratelimited("%s: packet too small to fetch opcode for peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto err;
+ }
+
+ /* DATA_V2 packets are handled in kernel, the rest goes to user space */
+ opcode = ovpn_opcode_from_skb(skb, 0);
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ if (opcode == OVPN_DATA_V1) {
+ net_warn_ratelimited("%s: DATA_V1 detected on the TCP stream\n",
+ netdev_name(peer->ovpn->dev));
+ goto err;
+ }
+
+ /* The packet size header must be there when sending the packet
+ * to userspace, therefore we put it back
+ */
+ skb_push(skb, 2);
+ ovpn_tcp_to_userspace(peer, strp->sk, skb);
+ return;
+ }
+
+ /* hold reference to peer as required by ovpn_recv().
+ *
+ * NOTE: in this context we should already be holding a reference to
+ * this peer, therefore ovpn_peer_hold() is not expected to fail
+ */
+ if (WARN_ON(!ovpn_peer_hold(peer)))
+ goto err;
+
+ ovpn_recv(peer, skb);
+ return;
+err:
+ dev_dstats_rx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
+}
+
+static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ int err = 0, off, copied = 0, ret;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ return -EBADF;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ skb = __skb_recv_datagram(sk, &peer->tcp.user_queue, flags, &off, &err);
+ if (!skb) {
+ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) {
+ ret = 0;
+ goto out;
+ }
+ ret = err;
+ goto out;
+ }
+
+ copied = len;
+ if (copied > skb->len)
+ copied = skb->len;
+ else if (copied < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, copied);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ ret = err;
+ goto out;
+ }
+
+ if (flags & MSG_TRUNC)
+ copied = skb->len;
+ kfree_skb(skb);
+ ret = copied;
+out:
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct ovpn_peer *peer = ovpn_sock->peer;
+ struct socket *sock = ovpn_sock->sock;
+
+ strp_stop(&peer->tcp.strp);
+ skb_queue_purge(&peer->tcp.user_queue);
+
+ /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */
+ sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready;
+ sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space;
+ sock->sk->sk_prot = peer->tcp.sk_cb.prot;
+ sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops;
+
+ rcu_assign_sk_user_data(sock->sk, NULL);
+}
+
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock)
+{
+ struct ovpn_peer *peer = sock->peer;
+
+ /* NOTE: we don't wait for peer->tcp.defer_del_work to finish:
+ * either the worker is not running or this function
+ * was invoked by that worker.
+ */
+
+ cancel_work_sync(&sock->tcp_tx_work);
+ strp_done(&peer->tcp.strp);
+
+ skb_queue_purge(&peer->tcp.out_queue);
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+}
+
+static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk)
+{
+ struct sk_buff *skb = peer->tcp.out_msg.skb;
+ int ret, flags;
+
+ if (!skb)
+ return;
+
+ if (peer->tcp.tx_in_progress)
+ return;
+
+ peer->tcp.tx_in_progress = true;
+
+ do {
+ flags = ovpn_skb_cb(skb)->nosignal ? MSG_NOSIGNAL : 0;
+ ret = skb_send_sock_locked_with_flags(sk, skb,
+ peer->tcp.out_msg.offset,
+ peer->tcp.out_msg.len,
+ flags);
+ if (unlikely(ret < 0)) {
+ if (ret == -EAGAIN)
+ goto out;
+
+ net_warn_ratelimited("%s: TCP error to peer %u: %d\n",
+ netdev_name(peer->ovpn->dev),
+ peer->id, ret);
+
+ /* in case of TCP error we can't recover the VPN
+ * stream therefore we abort the connection
+ */
+ ovpn_peer_hold(peer);
+ schedule_work(&peer->tcp.defer_del_work);
+
+ /* we bail out immediately and keep tx_in_progress set
+ * to true. This way we prevent more TX attempts
+ * which would lead to more invocations of
+ * schedule_work()
+ */
+ return;
+ }
+
+ peer->tcp.out_msg.len -= ret;
+ peer->tcp.out_msg.offset += ret;
+ } while (peer->tcp.out_msg.len > 0);
+
+ if (!peer->tcp.out_msg.len) {
+ preempt_disable();
+ dev_dstats_tx_add(peer->ovpn->dev, skb->len);
+ preempt_enable();
+ }
+
+ kfree_skb(peer->tcp.out_msg.skb);
+ peer->tcp.out_msg.skb = NULL;
+ peer->tcp.out_msg.len = 0;
+ peer->tcp.out_msg.offset = 0;
+
+out:
+ peer->tcp.tx_in_progress = false;
+}
+
+void ovpn_tcp_tx_work(struct work_struct *work)
+{
+ struct ovpn_socket *sock;
+
+ sock = container_of(work, struct ovpn_socket, tcp_tx_work);
+
+ lock_sock(sock->sock->sk);
+ if (sock->peer)
+ ovpn_tcp_send_sock(sock->peer, sock->sock->sk);
+ release_sock(sock->sock->sk);
+}
+
+static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
+ struct sk_buff *skb)
+{
+ if (peer->tcp.out_msg.skb)
+ ovpn_tcp_send_sock(peer, sk);
+
+ if (peer->tcp.out_msg.skb) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ return;
+ }
+
+ peer->tcp.out_msg.skb = skb;
+ peer->tcp.out_msg.len = skb->len;
+ peer->tcp.out_msg.offset = 0;
+ ovpn_tcp_send_sock(peer, sk);
+}
+
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+ struct sk_buff *skb)
+{
+ u16 len = skb->len;
+
+ *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);
+
+ spin_lock_nested(&sock->sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
+ if (sock_owned_by_user(sock->sk)) {
+ if (skb_queue_len(&peer->tcp.out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog)) {
+ dev_dstats_tx_dropped(peer->ovpn->dev);
+ kfree_skb(skb);
+ goto unlock;
+ }
+ __skb_queue_tail(&peer->tcp.out_queue, skb);
+ } else {
+ ovpn_tcp_send_sock_skb(peer, sock->sk, skb);
+ }
+unlock:
+ spin_unlock(&sock->sk->sk_lock.slock);
+}
+
+static void ovpn_tcp_release(struct sock *sk)
+{
+ struct sk_buff_head queue;
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock) {
+ rcu_read_unlock();
+ return;
+ }
+
+ peer = sock->peer;
+
+ /* during initialization this function is called before
+ * assigning sock->peer
+ */
+ if (unlikely(!peer || !ovpn_peer_hold(peer))) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ __skb_queue_head_init(&queue);
+ skb_queue_splice_init(&peer->tcp.out_queue, &queue);
+
+ while ((skb = __skb_dequeue(&queue)))
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+
+ peer->tcp.sk_cb.prot->release_cb(sk);
+ ovpn_peer_put(peer);
+}
+
+static int ovpn_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+{
+ struct ovpn_socket *sock;
+ int ret, linear = PAGE_SIZE;
+ struct ovpn_peer *peer;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return -EIO;
+ }
+ rcu_read_unlock();
+ peer = sock->peer;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL)) {
+ ret = -EOPNOTSUPP;
+ goto peer_free;
+ }
+
+ if (peer->tcp.out_msg.skb) {
+ ret = -EAGAIN;
+ goto peer_free;
+ }
+
+ if (size < linear)
+ linear = size;
+
+ skb = sock_alloc_send_pskb(sk, linear, size - linear,
+ msg->msg_flags & MSG_DONTWAIT, &ret, 0);
+ if (!skb) {
+ net_err_ratelimited("%s: skb alloc failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ skb_put(skb, linear);
+ skb->len = size;
+ skb->data_len = size - linear;
+
+ ret = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
+ if (ret) {
+ kfree_skb(skb);
+ net_err_ratelimited("%s: skb copy from iter failed: %d\n",
+ netdev_name(peer->ovpn->dev), ret);
+ goto peer_free;
+ }
+
+ ovpn_skb_cb(skb)->nosignal = msg->msg_flags & MSG_NOSIGNAL;
+ ovpn_tcp_send_sock_skb(peer, sk, skb);
+ ret = size;
+peer_free:
+ release_sock(sk);
+ ovpn_peer_put(peer);
+ return ret;
+}
+
+static int ovpn_tcp_disconnect(struct sock *sk, int flags)
+{
+ return -EBUSY;
+}
+
+static void ovpn_tcp_data_ready(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ trace_sk_data_ready(sk);
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer))
+ strp_data_ready(&sock->peer->tcp.strp);
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_write_space(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (likely(sock && sock->peer)) {
+ schedule_work(&sock->tcp_tx_work);
+ sock->peer->tcp.sk_cb.sk_write_space(sk);
+ }
+ rcu_read_unlock();
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops);
+
+static void ovpn_tcp_peer_del_work(struct work_struct *work)
+{
+ struct ovpn_peer *peer = container_of(work, struct ovpn_peer,
+ tcp.defer_del_work);
+
+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR);
+ ovpn_peer_put(peer);
+}
+
+/* Set TCP encapsulation callbacks */
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer)
+{
+ struct socket *sock = ovpn_sock->sock;
+ struct strp_callbacks cb = {
+ .rcv_msg = ovpn_tcp_rcv,
+ .parse_msg = ovpn_tcp_parse,
+ };
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ if (sock->sk->sk_user_data)
+ return -EBUSY;
+
+ /* only a fully connected socket is expected. Connection should be
+ * handled in userspace
+ */
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n",
+ netdev_name(peer->ovpn->dev),
+ sock->sk->sk_state);
+ return -EINVAL;
+ }
+
+ ret = strp_init(&peer->tcp.strp, sock->sk, &cb);
+ if (ret < 0) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return ret;
+ }
+
+ INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work);
+
+ __sk_dst_reset(sock->sk);
+ skb_queue_head_init(&peer->tcp.user_queue);
+ skb_queue_head_init(&peer->tcp.out_queue);
+
+ /* save current CBs so that they can be restored upon socket release */
+ peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready;
+ peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space;
+ peer->tcp.sk_cb.prot = sock->sk->sk_prot;
+ peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops;
+
+ /* assign our static CBs and prot/ops */
+ sock->sk->sk_data_ready = ovpn_tcp_data_ready;
+ sock->sk->sk_write_space = ovpn_tcp_write_space;
+
+ if (sock->sk->sk_family == AF_INET) {
+ sock->sk->sk_prot = &ovpn_tcp_prot;
+ sock->sk->sk_socket->ops = &ovpn_tcp_ops;
+ } else {
+ sock->sk->sk_prot = &ovpn_tcp6_prot;
+ sock->sk->sk_socket->ops = &ovpn_tcp6_ops;
+ }
+
+ /* avoid using task_frag */
+ sock->sk->sk_allocation = GFP_ATOMIC;
+ sock->sk->sk_use_task_frag = false;
+
+ /* enqueue the RX worker */
+ strp_check_rcv(&peer->tcp.strp);
+
+ return 0;
+}
+
+static void ovpn_tcp_close(struct sock *sk, long timeout)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_peer *peer;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->peer || !ovpn_peer_hold(sock->peer)) {
+ rcu_read_unlock();
+ return;
+ }
+ peer = sock->peer;
+ rcu_read_unlock();
+
+ ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+ peer->tcp.sk_cb.prot->close(sk, timeout);
+ ovpn_peer_put(peer);
+}
+
+static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ __poll_t mask = datagram_poll(file, sock, wait);
+ struct ovpn_socket *ovpn_sock;
+
+ rcu_read_lock();
+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
+ if (ovpn_sock && ovpn_sock->peer &&
+ !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ rcu_read_unlock();
+
+ return mask;
+}
+
+static void ovpn_tcp_build_protos(struct proto *new_prot,
+ struct proto_ops *new_ops,
+ const struct proto *orig_prot,
+ const struct proto_ops *orig_ops)
+{
+ memcpy(new_prot, orig_prot, sizeof(*new_prot));
+ memcpy(new_ops, orig_ops, sizeof(*new_ops));
+ new_prot->recvmsg = ovpn_tcp_recvmsg;
+ new_prot->sendmsg = ovpn_tcp_sendmsg;
+ new_prot->disconnect = ovpn_tcp_disconnect;
+ new_prot->close = ovpn_tcp_close;
+ new_prot->release_cb = ovpn_tcp_release;
+ new_ops->poll = ovpn_tcp_poll;
+}
+
+/* Initialize TCP static objects */
+void __init ovpn_tcp_init(void)
+{
+ ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot,
+ &inet_stream_ops);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, &tcpv6_prot,
+ &inet6_stream_ops);
+#endif
+}
diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h
new file mode 100644
index 000000000000..10aefa834cf3
--- /dev/null
+++ b/drivers/net/ovpn/tcp.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_TCP_H_
+#define _NET_OVPN_TCP_H_
+
+#include <linux/net.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include "peer.h"
+#include "skb.h"
+#include "socket.h"
+
+void __init ovpn_tcp_init(void);
+
+int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_peer *peer);
+void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock);
+void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock);
+
+/* Prepare skb and enqueue it for sending to peer.
+ *
+ * Preparation consist in prepending the skb payload with its size.
+ * Required by the OpenVPN protocol in order to extract packets from
+ * the TCP stream on the receiver side.
+ */
+void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, struct sk_buff *skb);
+void ovpn_tcp_tx_work(struct work_struct *work);
+
+#endif /* _NET_OVPN_TCP_H_ */
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
new file mode 100644
index 000000000000..aef8c0406ec9
--- /dev/null
+++ b/drivers/net/ovpn/udp.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <net/addrconf.h>
+#include <net/dst_cache.h>
+#include <net/route.h>
+#include <net/ipv6_stubs.h>
+#include <net/transp_v6.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+
+#include "ovpnpriv.h"
+#include "main.h"
+#include "bind.h"
+#include "io.h"
+#include "peer.h"
+#include "proto.h"
+#include "socket.h"
+#include "udp.h"
+
+/* Retrieve the corresponding ovpn object from a UDP socket
+ * rcu_read_lock must be held on entry
+ */
+static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
+{
+ struct ovpn_socket *ovpn_sock;
+
+ if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP))
+ return NULL;
+
+ ovpn_sock = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!ovpn_sock))
+ return NULL;
+
+ /* make sure that sk matches our stored transport socket */
+ if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk))
+ return NULL;
+
+ return ovpn_sock;
+}
+
+/**
+ * ovpn_udp_encap_recv - Start processing a received UDP packet.
+ * @sk: socket over which the packet was received
+ * @skb: the received packet
+ *
+ * If the first byte of the payload is:
+ * - DATA_V2 the packet is accepted for further processing,
+ * - DATA_V1 the packet is dropped as not supported,
+ * - anything else the packet is forwarded to the UDP stack for
+ * delivery to user space.
+ *
+ * Return:
+ * 0 if skb was consumed or dropped
+ * >0 if skb should be passed up to userspace as UDP (packet not consumed)
+ * <0 if skb should be resubmitted as proto -N (packet not consumed)
+ */
+static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_socket *ovpn_sock;
+ struct ovpn_priv *ovpn;
+ struct ovpn_peer *peer;
+ u32 peer_id;
+ u8 opcode;
+
+ ovpn_sock = ovpn_socket_from_udp_sock(sk);
+ if (unlikely(!ovpn_sock)) {
+ net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n",
+ __func__);
+ goto drop_noovpn;
+ }
+
+ ovpn = ovpn_sock->ovpn;
+ if (unlikely(!ovpn)) {
+ net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n");
+ goto drop_noovpn;
+ }
+
+ /* Make sure the first 4 bytes of the skb data buffer after the UDP
+ * header are accessible.
+ * They are required to fetch the OP code, the key ID and the peer ID.
+ */
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) +
+ OVPN_OPCODE_SIZE))) {
+ net_dbg_ratelimited("%s: packet too small from UDP socket\n",
+ netdev_name(ovpn->dev));
+ goto drop;
+ }
+
+ opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr));
+ if (unlikely(opcode != OVPN_DATA_V2)) {
+ /* DATA_V1 is not supported */
+ if (opcode == OVPN_DATA_V1)
+ goto drop;
+
+ /* unknown or control packet: let it bubble up to userspace */
+ return 1;
+ }
+
+ peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr));
+ /* some OpenVPN server implementations send data packets with the
+ * peer-id set to UNDEF. In this case we skip the peer lookup by peer-id
+ * and we try with the transport address
+ */
+ if (peer_id == OVPN_PEER_ID_UNDEF)
+ peer = ovpn_peer_get_by_transp_addr(ovpn, skb);
+ else
+ peer = ovpn_peer_get_by_id(ovpn, peer_id);
+
+ if (unlikely(!peer))
+ goto drop;
+
+ /* pop off outer UDP header */
+ __skb_pull(skb, sizeof(struct udphdr));
+ ovpn_recv(peer, skb);
+ return 0;
+
+drop:
+ dev_dstats_rx_dropped(ovpn->dev);
+drop_noovpn:
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * ovpn_udp4_output - send IPv4 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct rtable *rt;
+ struct flowi4 fl = {
+ .saddr = bind->local.ipv4.s_addr,
+ .daddr = bind->remote.in4.sin_addr.s_addr,
+ .fl4_sport = inet_sk(sk)->inet_sport,
+ .fl4_dport = bind->remote.in4.sin_port,
+ .flowi4_proto = sk->sk_protocol,
+ .flowi4_mark = sk->sk_mark,
+ };
+ int ret;
+
+ local_bh_disable();
+ rt = dst_cache_get_ip4(cache, &fl.saddr);
+ if (rt)
+ goto transmit;
+
+ if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
+ RT_SCOPE_HOST))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
+ fl.saddr = 0;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv4.s_addr = 0;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+
+ rt = ip_route_output_flow(sock_net(sk), &fl, sk);
+ }
+
+ if (IS_ERR(rt)) {
+ ret = PTR_ERR(rt);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in4,
+ ret);
+ goto err;
+ }
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
+
+transmit:
+ udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
+ fl.fl4_dport, false, sk->sk_no_check_tx);
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/**
+ * ovpn_udp6_output - send IPv6 packet over udp socket
+ * @peer: the destination peer
+ * @bind: the binding related to the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
+ struct dst_cache *cache, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+ int ret;
+
+ struct flowi6 fl = {
+ .saddr = bind->local.ipv6,
+ .daddr = bind->remote.in6.sin6_addr,
+ .fl6_sport = inet_sk(sk)->inet_sport,
+ .fl6_dport = bind->remote.in6.sin6_port,
+ .flowi6_proto = sk->sk_protocol,
+ .flowi6_mark = sk->sk_mark,
+ .flowi6_oif = bind->remote.in6.sin6_scope_id,
+ };
+
+ local_bh_disable();
+ dst = dst_cache_get_ip6(cache, &fl.saddr);
+ if (dst)
+ goto transmit;
+
+ if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
+ /* we may end up here when the cached address is not usable
+ * anymore. In this case we reset address/cache and perform a
+ * new look up
+ */
+ fl.saddr = in6addr_any;
+ spin_lock_bh(&peer->lock);
+ bind->local.ipv6 = in6addr_any;
+ spin_unlock_bh(&peer->lock);
+ dst_cache_reset(cache);
+ }
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
+ netdev_name(peer->ovpn->dev),
+ &bind->remote.in6, ret);
+ goto err;
+ }
+ dst_cache_set_ip6(cache, dst, &fl.saddr);
+
+transmit:
+ /* user IPv6 packets may be larger than the transport interface
+ * MTU (after encapsulation), however, since they are locally
+ * generated we should ensure they get fragmented.
+ * Setting the ignore_df flag to 1 will instruct ip6_fragment() to
+ * fragment packets if needed.
+ *
+ * NOTE: this is not needed for IPv4 because we pass df=0 to
+ * udp_tunnel_xmit_skb()
+ */
+ skb->ignore_df = 1;
+ udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
+ fl.fl6_dport, udp_get_no_check6_tx(sk));
+ ret = 0;
+err:
+ local_bh_enable();
+ return ret;
+}
+#endif
+
+/**
+ * ovpn_udp_output - transmit skb using udp-tunnel
+ * @peer: the destination peer
+ * @cache: dst cache
+ * @sk: the socket to send the packet over
+ * @skb: the packet to send
+ *
+ * rcu_read_lock should be held on entry.
+ * On return, the skb is consumed.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
+ struct sock *sk, struct sk_buff *skb)
+{
+ struct ovpn_bind *bind;
+ int ret;
+
+ /* set sk to null if skb is already orphaned */
+ if (!skb->destructor)
+ skb->sk = NULL;
+
+ rcu_read_lock();
+ bind = rcu_dereference(peer->bind);
+ if (unlikely(!bind)) {
+ net_warn_ratelimited("%s: no bind for remote peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ switch (bind->remote.in4.sin_family) {
+ case AF_INET:
+ ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
+ break;
+#endif
+ default:
+ ret = -EAFNOSUPPORT;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * ovpn_udp_send_skb - prepare skb and send it over via UDP
+ * @peer: the destination peer
+ * @sock: the RCU protected peer socket
+ * @skb: the packet to send
+ */
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+ struct sk_buff *skb)
+{
+ int ret = -1;
+
+ skb->dev = peer->ovpn->dev;
+ /* no checksum performed at this layer */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* get socket info */
+ if (unlikely(!sock)) {
+ net_warn_ratelimited("%s: no sock for remote peer %u\n",
+ netdev_name(peer->ovpn->dev), peer->id);
+ goto out;
+ }
+
+ /* crypto layer -> transport (UDP) */
+ ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb);
+out:
+ if (unlikely(ret < 0)) {
+ kfree_skb(skb);
+ return;
+ }
+}
+
+static void ovpn_udp_encap_destroy(struct sock *sk)
+{
+ struct ovpn_socket *sock;
+ struct ovpn_priv *ovpn;
+
+ rcu_read_lock();
+ sock = rcu_dereference_sk_user_data(sk);
+ if (!sock || !sock->ovpn) {
+ rcu_read_unlock();
+ return;
+ }
+ ovpn = sock->ovpn;
+ rcu_read_unlock();
+
+ ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
+}
+
+/**
+ * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn
+ * @ovpn_sock: socket to configure
+ * @ovpn: the openvp instance to link
+ *
+ * After invoking this function, the sock will be controlled by ovpn so that
+ * any incoming packet may be processed by ovpn first.
+ *
+ * Return: 0 on success or a negative error code otherwise
+ */
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_priv *ovpn)
+{
+ struct udp_tunnel_sock_cfg cfg = {
+ .encap_type = UDP_ENCAP_OVPNINUDP,
+ .encap_rcv = ovpn_udp_encap_recv,
+ .encap_destroy = ovpn_udp_encap_destroy,
+ };
+ struct socket *sock = ovpn_sock->sock;
+ struct ovpn_socket *old_data;
+ int ret;
+
+ /* make sure no pre-existing encapsulation handler exists */
+ rcu_read_lock();
+ old_data = rcu_dereference_sk_user_data(sock->sk);
+ if (!old_data) {
+ /* socket is currently unused - we can take it */
+ rcu_read_unlock();
+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg);
+ return 0;
+ }
+
+ /* socket is in use. We need to understand if it's owned by this ovpn
+ * instance or by something else.
+ * In the former case, we can increase the refcounter and happily
+ * use it, because the same UDP socket is expected to be shared among
+ * different peers.
+ *
+ * Unlikely TCP, a single UDP socket can be used to talk to many remote
+ * hosts and therefore openvpn instantiates one only for all its peers
+ */
+ if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
+ old_data->ovpn == ovpn) {
+ netdev_dbg(ovpn->dev,
+ "provided socket already owned by this interface\n");
+ ret = -EALREADY;
+ } else {
+ netdev_dbg(ovpn->dev,
+ "provided socket already taken by other user\n");
+ ret = -EBUSY;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * ovpn_udp_socket_detach - clean udp-tunnel status for this socket
+ * @ovpn_sock: the socket to clean
+ */
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
+{
+ struct udp_tunnel_sock_cfg cfg = { };
+
+ setup_udp_tunnel_sock(sock_net(ovpn_sock->sock->sk), ovpn_sock->sock,
+ &cfg);
+}
diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h
new file mode 100644
index 000000000000..9994eb6e0428
--- /dev/null
+++ b/drivers/net/ovpn/udp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* OpenVPN data channel offload
+ *
+ * Copyright (C) 2019-2025 OpenVPN, Inc.
+ *
+ * Author: Antonio Quartulli <antonio@openvpn.net>
+ */
+
+#ifndef _NET_OVPN_UDP_H_
+#define _NET_OVPN_UDP_H_
+
+#include <net/sock.h>
+
+struct ovpn_peer;
+struct ovpn_priv;
+struct socket;
+
+int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock,
+ struct ovpn_priv *ovpn);
+void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock);
+
+void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock,
+ struct sk_buff *skb);
+
+#endif /* _NET_OVPN_UDP_H_ */
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index f873a92d2445..28e6bc4a1f14 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -245,30 +245,21 @@ static int __net_init pfcp_net_init(struct net *net)
return 0;
}
-static void __net_exit pfcp_net_exit(struct net *net)
+static void __net_exit pfcp_net_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
struct pfcp_dev *pfcp, *pfcp_next;
- struct net_device *dev;
- LIST_HEAD(list);
-
- rtnl_lock();
- for_each_netdev(net, dev)
- if (dev->rtnl_link_ops == &pfcp_link_ops)
- pfcp_dellink(dev, &list);
list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
- pfcp_dellink(pfcp->dev, &list);
-
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ pfcp_dellink(pfcp->dev, dev_to_kill);
}
static struct pernet_operations pfcp_net_ops = {
- .init = pfcp_net_init,
- .exit = pfcp_net_exit,
- .id = &pfcp_net_id,
- .size = sizeof(struct pfcp_net),
+ .init = pfcp_net_init,
+ .exit_rtnl = pfcp_net_exit_rtnl,
+ .id = &pfcp_net_id,
+ .size = sizeof(struct pfcp_net),
};
static int __init pfcp_init(void)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d29f9f7fd2e1..53dad2482026 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -5,7 +5,6 @@
config PHYLINK
tristate
- depends on NETDEVICES
select PHYLIB
select SWPHY
help
@@ -15,9 +14,7 @@ config PHYLINK
menuconfig PHYLIB
tristate "PHY Device support and infrastructure"
- depends on NETDEVICES
- select MDIO_DEVICE
- select MDIO_DEVRES
+ select MDIO_BUS
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
@@ -79,6 +76,18 @@ config SFP
comment "MII PHY device drivers"
+config AS21XXX_PHY
+ tristate "Aeonsemi AS21xxx PHYs"
+ help
+ Currently supports the Aeonsemi AS21xxx PHY.
+
+ These are C45 PHYs 10G that require all a generic firmware.
+
+ Supported PHYs AS21011JB1, AS21011PB1, AS21010JB1, AS21010PB1,
+ AS21511JB1, AS21511PB1, AS21510JB1, AS21510PB1, AS21210JB1,
+ AS21210PB1 that all register with the PHY ID 0x7500 0x7500
+ before the firmware is loaded.
+
config AIR_EN8811H_PHY
tristate "Airoha EN8811H 2.5 Gigabit PHY"
help
@@ -266,6 +275,18 @@ config MAXLINEAR_GPHY
Support for the Maxlinear GPY115, GPY211, GPY212, GPY215,
GPY241, GPY245 PHYs.
+config MAXLINEAR_86110_PHY
+ tristate "MaxLinear MXL86110 PHY support"
+ help
+ Support for the MaxLinear MXL86110 Gigabit Ethernet
+ Physical Layer transceiver.
+ The MXL86110 is commonly used in networking equipment such as
+ routers, switches, and embedded systems, providing the
+ physical interface for 10/100/1000 Mbps Ethernet connections
+ over copper media.
+ If you are using a board with the MXL86110 PHY connected to your
+ Ethernet MAC, you should enable this option.
+
source "drivers/net/phy/mediatek/Kconfig"
config MICREL_PHY
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 23ce205ae91d..7827609e9032 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,30 +3,22 @@
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o phy_link_topology.o \
- phy_package.o phy_caps.o
+ phy_package.o phy_caps.o mdio_bus_provider.o
mdio-bus-y += mdio_bus.o mdio_device.o
-ifdef CONFIG_MDIO_DEVICE
-obj-y += mdio-boardinfo.o
-endif
-
-# PHYLIB implies MDIO_DEVICE, in that case, we have a bunch of circular
-# dependencies that does not make it possible to split mdio-bus objects into a
-# dedicated loadable module, so we bundle them all together into libphy.ko
ifdef CONFIG_PHYLIB
-libphy-y += $(mdio-bus-y)
-# the stubs are built-in whenever PHYLIB is built-in or module
-obj-y += stubs.o
-else
-obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o
+# built-in whenever PHYLIB is built-in or module
+obj-y += stubs.o mdio-boardinfo.o
endif
-obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o
+
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o
+obj-$(CONFIG_MDIO_BUS) += mdio-bus.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_PHYLIB) += mdio_devres.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
@@ -40,6 +32,7 @@ obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AMCC_QT2025_PHY) += qt2025.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
+obj-$(CONFIG_AS21XXX_PHY) += as21xxx.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -75,6 +68,7 @@ obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_88Q2XXX_PHY) += marvell-88q2xxx.o
obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o
obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
+obj-$(CONFIG_MAXLINEAR_86110_PHY) += mxl-86110.o
obj-y += mediatek/
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index e9fd24cb7270..57fbd8df9438 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -11,6 +11,7 @@
* Copyright (C) 2023 Airoha Technology Corp.
*/
+#include <linux/clk-provider.h>
#include <linux/phy.h>
#include <linux/firmware.h>
#include <linux/property.h>
@@ -115,6 +116,11 @@
#define EN8811H_GPIO_OUTPUT 0xcf8b8
#define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5))
+#define EN8811H_HWTRAP1 0xcf914
+#define EN8811H_HWTRAP1_CKO BIT(12)
+#define EN8811H_CLK_CGM 0xcf958
+#define EN8811H_CLK_CGM_CKO BIT(26)
+
#define EN8811H_FW_CTRL_1 0x0f0018
#define EN8811H_FW_CTRL_1_START 0x0
#define EN8811H_FW_CTRL_1_FINISH 0x1
@@ -142,10 +148,15 @@ struct led {
unsigned long state;
};
+#define clk_hw_to_en8811h_priv(_hw) \
+ container_of(_hw, struct en8811h_priv, hw)
+
struct en8811h_priv {
- u32 firmware_version;
- bool mcu_needs_restart;
- struct led led[EN8811H_LED_COUNT];
+ u32 firmware_version;
+ bool mcu_needs_restart;
+ struct led led[EN8811H_LED_COUNT];
+ struct clk_hw hw;
+ struct phy_device *phydev;
};
enum {
@@ -806,6 +817,86 @@ static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index,
return 0;
};
+static unsigned long en8811h_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, EN8811H_HWTRAP1, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & EN8811H_HWTRAP1_CKO) ? 50000000 : 25000000;
+}
+
+static int en8811h_clk_enable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ return air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM,
+ EN8811H_CLK_CGM_CKO,
+ EN8811H_CLK_CGM_CKO);
+}
+
+static void en8811h_clk_disable(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+
+ air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM,
+ EN8811H_CLK_CGM_CKO, 0);
+}
+
+static int en8811h_clk_is_enabled(struct clk_hw *hw)
+{
+ struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw);
+ struct phy_device *phydev = priv->phydev;
+ u32 pbus_value;
+ int ret;
+
+ ret = air_buckpbus_reg_read(phydev, EN8811H_CLK_CGM, &pbus_value);
+ if (ret < 0)
+ return ret;
+
+ return (pbus_value & EN8811H_CLK_CGM_CKO);
+}
+
+static const struct clk_ops en8811h_clk_ops = {
+ .recalc_rate = en8811h_clk_recalc_rate,
+ .enable = en8811h_clk_enable,
+ .disable = en8811h_clk_disable,
+ .is_enabled = en8811h_clk_is_enabled,
+};
+
+static int en8811h_clk_provider_setup(struct device *dev, struct clk_hw *hw)
+{
+ struct clk_init_data init;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-cko",
+ fwnode_get_name(dev_fwnode(dev)));
+ if (!init.name)
+ return -ENOMEM;
+
+ init.ops = &en8811h_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
static int en8811h_probe(struct phy_device *phydev)
{
struct en8811h_priv *priv;
@@ -838,6 +929,12 @@ static int en8811h_probe(struct phy_device *phydev)
return ret;
}
+ priv->phydev = phydev;
+ /* Co-Clock Output */
+ ret = en8811h_clk_provider_setup(&phydev->mdio.dev, &priv->hw);
+ if (ret)
+ return ret;
+
/* Configure led gpio pins as output */
ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT,
EN8811H_GPIO_OUTPUT_345,
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 08b1c9cc902b..77a48635d7bf 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -516,8 +516,7 @@ static int aqr105_read_status(struct phy_device *phydev)
if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
return 0;
- /**
- * The status register is not immediately correct on line side link up.
+ /* The status register is not immediately correct on line side link up.
* Poll periodically until it reflects the correct ON state.
* Only return fail for read error, timeout defaults to OFF state.
*/
@@ -634,8 +633,7 @@ static int aqr107_read_status(struct phy_device *phydev)
if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
return 0;
- /**
- * The status register is not immediately correct on line side link up.
+ /* The status register is not immediately correct on line side link up.
* Poll periodically until it reflects the correct ON state.
* Only return fail for read error, timeout defaults to OFF state.
*/
diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c
new file mode 100644
index 000000000000..92697f43087d
--- /dev/null
+++ b/drivers/net/phy/as21xxx.c
@@ -0,0 +1,1087 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Aeonsemi AS21XXxX PHY Driver
+ *
+ * Author: Christian Marangi <ansuelsmth@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#define VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR 0x3
+#define VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR 0x4
+
+#define VEND1_GLB_REG_CPU_CTRL 0xe
+#define VEND1_GLB_CPU_CTRL_MASK GENMASK(4, 0)
+#define VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK GENMASK(12, 8)
+#define VEND1_GLB_CPU_CTRL_LED_POLARITY(_n) FIELD_PREP(VEND1_GLB_CPU_CTRL_LED_POLARITY_MASK, \
+ BIT(_n))
+
+#define VEND1_FW_START_ADDR 0x100
+
+#define VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD 0x101
+#define VEND1_GLB_REG_MDIO_INDIRECT_LOAD 0x102
+
+#define VEND1_GLB_REG_MDIO_INDIRECT_STATUS 0x103
+
+#define VEND1_PTP_CLK 0x142
+#define VEND1_PTP_CLK_EN BIT(6)
+
+/* 5 LED at step of 0x20
+ * FE: Fast-Ethernet (10/100)
+ * GE: Gigabit-Ethernet (1000)
+ * NG: New-Generation (2500/5000/10000)
+ */
+#define VEND1_LED_REG(_n) (0x1800 + ((_n) * 0x10))
+#define VEND1_LED_REG_A_EVENT GENMASK(15, 11)
+#define VEND1_LED_CONF 0x1881
+#define VEND1_LED_CONFG_BLINK GENMASK(7, 0)
+
+#define VEND1_SPEED_STATUS 0x4002
+#define VEND1_SPEED_MASK GENMASK(7, 0)
+#define VEND1_SPEED_10000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x3)
+#define VEND1_SPEED_5000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x5)
+#define VEND1_SPEED_2500 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x9)
+#define VEND1_SPEED_1000 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x10)
+#define VEND1_SPEED_100 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x20)
+#define VEND1_SPEED_10 FIELD_PREP_CONST(VEND1_SPEED_MASK, 0x0)
+
+#define VEND1_IPC_CMD 0x5801
+#define AEON_IPC_CMD_PARITY BIT(15)
+#define AEON_IPC_CMD_SIZE GENMASK(10, 6)
+#define AEON_IPC_CMD_OPCODE GENMASK(5, 0)
+
+#define IPC_CMD_NOOP 0x0 /* Do nothing */
+#define IPC_CMD_INFO 0x1 /* Get Firmware Version */
+#define IPC_CMD_SYS_CPU 0x2 /* SYS_CPU */
+#define IPC_CMD_BULK_DATA 0xa /* Pass bulk data in ipc registers. */
+#define IPC_CMD_BULK_WRITE 0xc /* Write bulk data to memory */
+#define IPC_CMD_CFG_PARAM 0x1a /* Write config parameters to memory */
+#define IPC_CMD_NG_TESTMODE 0x1b /* Set NG test mode and tone */
+#define IPC_CMD_TEMP_MON 0x15 /* Temperature monitoring function */
+#define IPC_CMD_SET_LED 0x23 /* Set led */
+
+#define VEND1_IPC_STS 0x5802
+#define AEON_IPC_STS_PARITY BIT(15)
+#define AEON_IPC_STS_SIZE GENMASK(14, 10)
+#define AEON_IPC_STS_OPCODE GENMASK(9, 4)
+#define AEON_IPC_STS_STATUS GENMASK(3, 0)
+#define AEON_IPC_STS_STATUS_RCVD FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x1)
+#define AEON_IPC_STS_STATUS_PROCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x2)
+#define AEON_IPC_STS_STATUS_SUCCESS FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x4)
+#define AEON_IPC_STS_STATUS_ERROR FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0x8)
+#define AEON_IPC_STS_STATUS_BUSY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xe)
+#define AEON_IPC_STS_STATUS_READY FIELD_PREP_CONST(AEON_IPC_STS_STATUS, 0xf)
+
+#define VEND1_IPC_DATA0 0x5808
+#define VEND1_IPC_DATA1 0x5809
+#define VEND1_IPC_DATA2 0x580a
+#define VEND1_IPC_DATA3 0x580b
+#define VEND1_IPC_DATA4 0x580c
+#define VEND1_IPC_DATA5 0x580d
+#define VEND1_IPC_DATA6 0x580e
+#define VEND1_IPC_DATA7 0x580f
+#define VEND1_IPC_DATA(_n) (VEND1_IPC_DATA0 + (_n))
+
+/* Sub command of CMD_INFO */
+#define IPC_INFO_VERSION 0x1
+
+/* Sub command of CMD_SYS_CPU */
+#define IPC_SYS_CPU_REBOOT 0x3
+#define IPC_SYS_CPU_IMAGE_OFST 0x4
+#define IPC_SYS_CPU_IMAGE_CHECK 0x5
+#define IPC_SYS_CPU_PHY_ENABLE 0x6
+
+/* Sub command of CMD_CFG_PARAM */
+#define IPC_CFG_PARAM_DIRECT 0x4
+
+/* CFG DIRECT sub command */
+#define IPC_CFG_PARAM_DIRECT_NG_PHYCTRL 0x1
+#define IPC_CFG_PARAM_DIRECT_CU_AN 0x2
+#define IPC_CFG_PARAM_DIRECT_SDS_PCS 0x3
+#define IPC_CFG_PARAM_DIRECT_AUTO_EEE 0x4
+#define IPC_CFG_PARAM_DIRECT_SDS_PMA 0x5
+#define IPC_CFG_PARAM_DIRECT_DPC_RA 0x6
+#define IPC_CFG_PARAM_DIRECT_DPC_PKT_CHK 0x7
+#define IPC_CFG_PARAM_DIRECT_DPC_SDS_WAIT_ETH 0x8
+#define IPC_CFG_PARAM_DIRECT_WDT 0x9
+#define IPC_CFG_PARAM_DIRECT_SDS_RESTART_AN 0x10
+#define IPC_CFG_PARAM_DIRECT_TEMP_MON 0x11
+#define IPC_CFG_PARAM_DIRECT_WOL 0x12
+
+/* Sub command of CMD_TEMP_MON */
+#define IPC_CMD_TEMP_MON_GET 0x4
+
+#define AS21XXX_MDIO_AN_C22 0xffe0
+
+#define PHY_ID_AS21XXX 0x75009410
+/* AS21xxx ID Legend
+ * AS21x1xxB1
+ * ^ ^^
+ * | |J: Supports SyncE/PTP
+ * | |P: No SyncE/PTP support
+ * | 1: Supports 2nd Serdes
+ * | 2: Not 2nd Serdes support
+ * 0: 10G, 5G, 2.5G
+ * 5: 5G, 2.5G
+ * 2: 2.5G
+ */
+#define PHY_ID_AS21011JB1 0x75009402
+#define PHY_ID_AS21011PB1 0x75009412
+#define PHY_ID_AS21010JB1 0x75009422
+#define PHY_ID_AS21010PB1 0x75009432
+#define PHY_ID_AS21511JB1 0x75009442
+#define PHY_ID_AS21511PB1 0x75009452
+#define PHY_ID_AS21510JB1 0x75009462
+#define PHY_ID_AS21510PB1 0x75009472
+#define PHY_ID_AS21210JB1 0x75009482
+#define PHY_ID_AS21210PB1 0x75009492
+#define PHY_VENDOR_AEONSEMI 0x75009400
+
+#define AEON_MAX_LEDS 5
+#define AEON_IPC_DELAY 10000
+#define AEON_IPC_TIMEOUT (AEON_IPC_DELAY * 100)
+#define AEON_IPC_DATA_NUM_REGISTERS 8
+#define AEON_IPC_DATA_MAX (AEON_IPC_DATA_NUM_REGISTERS * sizeof(u16))
+
+#define AEON_BOOT_ADDR 0x1000
+#define AEON_CPU_BOOT_ADDR 0x2000
+#define AEON_CPU_CTRL_FW_LOAD (BIT(4) | BIT(2) | BIT(1) | BIT(0))
+#define AEON_CPU_CTRL_FW_START BIT(0)
+
+enum as21xxx_led_event {
+ VEND1_LED_REG_A_EVENT_ON_10 = 0x0,
+ VEND1_LED_REG_A_EVENT_ON_100,
+ VEND1_LED_REG_A_EVENT_ON_1000,
+ VEND1_LED_REG_A_EVENT_ON_2500,
+ VEND1_LED_REG_A_EVENT_ON_5000,
+ VEND1_LED_REG_A_EVENT_ON_10000,
+ VEND1_LED_REG_A_EVENT_ON_FE_GE,
+ VEND1_LED_REG_A_EVENT_ON_NG,
+ VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX,
+ VEND1_LED_REG_A_EVENT_ON_COLLISION,
+ VEND1_LED_REG_A_EVENT_BLINK_TX,
+ VEND1_LED_REG_A_EVENT_BLINK_RX,
+ VEND1_LED_REG_A_EVENT_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_LINK,
+ VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX,
+ VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT,
+ VEND1_LED_REG_A_EVENT_ON_NG_BLINK_FE_GE,
+ VEND1_LED_REG_A_EVENT_ON_FD_BLINK_COLLISION,
+ VEND1_LED_REG_A_EVENT_ON,
+ VEND1_LED_REG_A_EVENT_OFF,
+};
+
+struct as21xxx_led_pattern_info {
+ unsigned int pattern;
+ u16 val;
+};
+
+struct as21xxx_priv {
+ bool parity_status;
+ /* Protect concurrent IPC access */
+ struct mutex ipc_lock;
+};
+
+static struct as21xxx_led_pattern_info as21xxx_led_supported_pattern[] = {
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10),
+ .val = VEND1_LED_REG_A_EVENT_ON_10
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_100),
+ .val = VEND1_LED_REG_A_EVENT_ON_100
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_1000),
+ .val = VEND1_LED_REG_A_EVENT_ON_1000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500),
+ .val = VEND1_LED_REG_A_EVENT_ON_2500
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_5000),
+ .val = VEND1_LED_REG_A_EVENT_ON_5000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_10000
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000),
+ .val = VEND1_LED_REG_A_EVENT_ON_FE_GE
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_NG
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_FULL_DUPLEX),
+ .val = VEND1_LED_REG_A_EVENT_ON_FULL_DUPLEX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_TX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_TX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_RX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_LINK_BLINK_RX
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_FE_GE_BLINK_ACT
+ },
+ {
+ .pattern = BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX),
+ .val = VEND1_LED_REG_A_EVENT_ON_NG_BLINK_ACT
+ }
+};
+
+static int aeon_firmware_boot(struct phy_device *phydev, const u8 *data,
+ size_t size)
+{
+ int i, ret;
+ u16 val;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL,
+ VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_LOAD);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_FW_START_ADDR,
+ AEON_BOOT_ADDR);
+ if (ret)
+ return ret;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_ADDRCMD,
+ 0x3ffc, 0xc000);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_STATUS);
+ if (val > 1) {
+ phydev_err(phydev, "wrong origin mdio_indirect_status: %x\n", val);
+ return -EINVAL;
+ }
+
+ /* Firmware is always aligned to u16 */
+ for (i = 0; i < size; i += 2) {
+ val = data[i + 1] << 8 | data[i];
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_MDIO_INDIRECT_LOAD, val);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_RESET_ADDR_LO_BASEADDR,
+ lower_16_bits(AEON_CPU_BOOT_ADDR));
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_RESET_ADDR_HI_BASEADDR,
+ upper_16_bits(AEON_CPU_BOOT_ADDR));
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLB_REG_CPU_CTRL,
+ VEND1_GLB_CPU_CTRL_MASK, AEON_CPU_CTRL_FW_START);
+}
+
+static int aeon_firmware_load(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ phydev_err(phydev, "failed to find FW file %s (%d)\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = aeon_firmware_boot(phydev, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+static bool aeon_ipc_ready(u16 val, bool parity_status)
+{
+ u16 status;
+
+ if (FIELD_GET(AEON_IPC_STS_PARITY, val) != parity_status)
+ return false;
+
+ status = val & AEON_IPC_STS_STATUS;
+
+ return status != AEON_IPC_STS_STATUS_RCVD &&
+ status != AEON_IPC_STS_STATUS_PROCESS &&
+ status != AEON_IPC_STS_STATUS_BUSY;
+}
+
+static int aeon_ipc_wait_cmd(struct phy_device *phydev, bool parity_status)
+{
+ u16 val;
+
+ /* Exit condition logic:
+ * - Wait for parity bit equal
+ * - Wait for status success, error OR ready
+ */
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS, val,
+ aeon_ipc_ready(val, parity_status),
+ AEON_IPC_DELAY, AEON_IPC_TIMEOUT, false);
+}
+
+static int aeon_ipc_send_cmd(struct phy_device *phydev,
+ struct as21xxx_priv *priv,
+ u16 cmd, u16 *ret_sts)
+{
+ bool curr_parity;
+ int ret;
+
+ /* The IPC sync by using a single parity bit.
+ * Each CMD have alternately this bit set or clear
+ * to understand correct flow and packet order.
+ */
+ curr_parity = priv->parity_status;
+ if (priv->parity_status)
+ cmd |= AEON_IPC_CMD_PARITY;
+
+ /* Always update parity for next packet */
+ priv->parity_status = !priv->parity_status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_CMD, cmd);
+ if (ret)
+ return ret;
+
+ /* Wait for packet to be processed */
+ usleep_range(AEON_IPC_DELAY, AEON_IPC_DELAY + 5000);
+
+ /* With no ret_sts, ignore waiting for packet completion
+ * (ipc parity bit sync)
+ */
+ if (!ret_sts)
+ return 0;
+
+ ret = aeon_ipc_wait_cmd(phydev, curr_parity);
+ if (ret)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_STS);
+ if (ret < 0)
+ return ret;
+
+ *ret_sts = ret;
+ if ((*ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* If data is NULL, return 0 or negative error.
+ * If data not NULL, return number of Bytes received from IPC or
+ * a negative error.
+ */
+static int aeon_ipc_send_msg(struct phy_device *phydev,
+ u16 opcode, u16 *data, unsigned int data_len,
+ u16 *ret_data)
+{
+ struct as21xxx_priv *priv = phydev->priv;
+ unsigned int ret_size;
+ u16 cmd, ret_sts;
+ int ret;
+ int i;
+
+ /* IPC have a max of 8 register to transfer data,
+ * make sure we never exceed this.
+ */
+ if (data_len > AEON_IPC_DATA_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < data_len / sizeof(u16); i++)
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i),
+ data[i]);
+
+ cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, data_len) |
+ FIELD_PREP(AEON_IPC_CMD_OPCODE, opcode);
+
+ mutex_lock(&priv->ipc_lock);
+
+ ret = aeon_ipc_send_cmd(phydev, priv, cmd, &ret_sts);
+ if (ret) {
+ phydev_err(phydev, "failed to send ipc msg for %x: %d\n",
+ opcode, ret);
+ goto out;
+ }
+
+ if (!data)
+ goto out;
+
+ if ((ret_sts & AEON_IPC_STS_STATUS) == AEON_IPC_STS_STATUS_ERROR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Prevent IPC from stack smashing the kernel.
+ * We can't trust IPC to return a good value and we always
+ * preallocate space for 16 Bytes.
+ */
+ ret_size = FIELD_GET(AEON_IPC_STS_SIZE, ret_sts);
+ if (ret_size > AEON_IPC_DATA_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read data from IPC data register for ret_size value from IPC */
+ for (i = 0; i < DIV_ROUND_UP(ret_size, sizeof(u16)); i++) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_IPC_DATA(i));
+ if (ret < 0)
+ goto out;
+
+ ret_data[i] = ret;
+ }
+
+ ret = ret_size;
+
+out:
+ mutex_unlock(&priv->ipc_lock);
+
+ return ret;
+}
+
+static int aeon_ipc_noop(struct phy_device *phydev,
+ struct as21xxx_priv *priv, u16 *ret_sts)
+{
+ u16 cmd;
+
+ cmd = FIELD_PREP(AEON_IPC_CMD_SIZE, 0) |
+ FIELD_PREP(AEON_IPC_CMD_OPCODE, IPC_CMD_NOOP);
+
+ return aeon_ipc_send_cmd(phydev, priv, cmd, ret_sts);
+}
+
+/* Logic to sync parity bit with IPC.
+ * We send 2 NOP cmd with same partity and we wait for IPC
+ * to handle the packet only for the second one. This way
+ * we make sure we are sync for every next cmd.
+ */
+static int aeon_ipc_sync_parity(struct phy_device *phydev,
+ struct as21xxx_priv *priv)
+{
+ u16 ret_sts;
+ int ret;
+
+ mutex_lock(&priv->ipc_lock);
+
+ /* Send NOP with no parity */
+ aeon_ipc_noop(phydev, priv, NULL);
+
+ /* Reset packet parity */
+ priv->parity_status = false;
+
+ /* Send second NOP with no parity */
+ ret = aeon_ipc_noop(phydev, priv, &ret_sts);
+
+ mutex_unlock(&priv->ipc_lock);
+
+ /* We expect to return -EINVAL */
+ if (ret != -EINVAL)
+ return ret;
+
+ if ((ret_sts & AEON_IPC_STS_STATUS) != AEON_IPC_STS_STATUS_READY) {
+ phydev_err(phydev, "Invalid IPC status on sync parity: %x\n",
+ ret_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aeon_ipc_get_fw_version(struct phy_device *phydev)
+{
+ u16 ret_data[AEON_IPC_DATA_NUM_REGISTERS], data[1];
+ char fw_version[AEON_IPC_DATA_MAX + 1];
+ int ret;
+
+ data[0] = IPC_INFO_VERSION;
+
+ ret = aeon_ipc_send_msg(phydev, IPC_CMD_INFO, data,
+ sizeof(data), ret_data);
+ if (ret < 0)
+ return ret;
+
+ /* Make sure FW version is NULL terminated */
+ memcpy(fw_version, ret_data, ret);
+ fw_version[ret] = '\0';
+
+ phydev_info(phydev, "Firmware Version: %s\n", fw_version);
+
+ return 0;
+}
+
+static int aeon_dpc_ra_enable(struct phy_device *phydev)
+{
+ u16 data[2];
+
+ data[0] = IPC_CFG_PARAM_DIRECT;
+ data[1] = IPC_CFG_PARAM_DIRECT_DPC_RA;
+
+ return aeon_ipc_send_msg(phydev, IPC_CMD_CFG_PARAM, data,
+ sizeof(data), NULL);
+}
+
+static int as21xxx_probe(struct phy_device *phydev)
+{
+ struct as21xxx_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev,
+ sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ ret = devm_mutex_init(&phydev->mdio.dev,
+ &priv->ipc_lock);
+ if (ret)
+ return ret;
+
+ ret = aeon_ipc_sync_parity(phydev, priv);
+ if (ret)
+ return ret;
+
+ ret = aeon_ipc_get_fw_version(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable PTP clk if not already Enabled */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK,
+ VEND1_PTP_CLK_EN);
+ if (ret)
+ return ret;
+
+ return aeon_dpc_ra_enable(phydev);
+}
+
+static int as21xxx_read_link(struct phy_device *phydev, int *bmcr)
+{
+ int status;
+
+ /* Normal C22 BMCR report inconsistent data, use
+ * the mapped C22 in C45 to have more consistent link info.
+ */
+ *bmcr = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_BMCR);
+ if (*bmcr < 0)
+ return *bmcr;
+
+ /* Autoneg is being started, therefore disregard current
+ * link status and report link as down.
+ */
+ if (*bmcr & BMCR_ANRESTART) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ status = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (status < 0)
+ return status;
+
+ phydev->link = !!(status & MDIO_STAT1_LSTATUS);
+
+ return 0;
+}
+
+static int as21xxx_read_c22_lpa(struct phy_device *phydev)
+{
+ int lpagb;
+
+ /* MII_STAT1000 are only filled in the mapped C22
+ * in C45, use that to fill lpagb values and check.
+ */
+ lpagb = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
+ if (lpagb & LPA_1000MSFAIL) {
+ int adv = phy_read_mmd(phydev, MDIO_MMD_AN,
+ AS21XXX_MDIO_AN_C22 + MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
+ if (adv & CTL1000_ENABLE_MASTER)
+ phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
+ else
+ phydev_err(phydev, "Master/Slave resolution failed\n");
+ return -ENOLINK;
+ }
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
+
+ return 0;
+}
+
+static int as21xxx_read_status(struct phy_device *phydev)
+{
+ int bmcr, old_link = phydev->link;
+ int ret;
+
+ ret = as21xxx_read_link(phydev, &bmcr);
+ if (ret)
+ return ret;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret)
+ return ret;
+
+ ret = as21xxx_read_c22_lpa(phydev);
+ if (ret)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+ } else {
+ int speed;
+
+ linkmode_zero(phydev->lp_advertising);
+
+ speed = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SPEED_STATUS);
+ if (speed < 0)
+ return speed;
+
+ switch (speed & VEND1_SPEED_STATUS) {
+ case VEND1_SPEED_10000:
+ phydev->speed = SPEED_10000;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_5000:
+ phydev->speed = SPEED_5000;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ break;
+ case VEND1_SPEED_100:
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ case VEND1_SPEED_10:
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_FULL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int as21xxx_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 val = VEND1_LED_REG_A_EVENT_OFF;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ if (value)
+ val = VEND1_LED_REG_A_EVENT_ON;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_LED_REG(index),
+ VEND1_LED_REG_A_EVENT,
+ FIELD_PREP(VEND1_LED_REG_A_EVENT, val));
+}
+
+static int as21xxx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int i;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (rules == as21xxx_led_supported_pattern[i].pattern)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int as21xxx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int i, val;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_LED_REG(index));
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(VEND1_LED_REG_A_EVENT, val);
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (val == as21xxx_led_supported_pattern[i].val) {
+ *rules = as21xxx_led_supported_pattern[i].pattern;
+ return 0;
+ }
+
+ /* Should be impossible */
+ return -EINVAL;
+}
+
+static int as21xxx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+ int i;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(as21xxx_led_supported_pattern); i++)
+ if (rules == as21xxx_led_supported_pattern[i].pattern) {
+ val = as21xxx_led_supported_pattern[i].val;
+ break;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_LED_REG(index),
+ VEND1_LED_REG_A_EVENT,
+ FIELD_PREP(VEND1_LED_REG_A_EVENT, val));
+}
+
+static int as21xxx_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ bool led_active_low = false;
+ u16 mask, val = 0;
+ u32 mode;
+
+ if (index > AEON_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ led_active_low = true;
+ break;
+ case PHY_LED_ACTIVE_HIGH: /* default mode */
+ led_active_low = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ mask = VEND1_GLB_CPU_CTRL_LED_POLARITY(index);
+ if (led_active_low)
+ val = VEND1_GLB_CPU_CTRL_LED_POLARITY(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLB_REG_CPU_CTRL,
+ mask, val);
+}
+
+static int as21xxx_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ struct as21xxx_priv *priv;
+ u16 ret_sts;
+ u32 phy_id;
+ int ret;
+
+ /* Skip PHY that are not AS21xxx or already have firmware loaded */
+ if (phydev->c45_ids.device_ids[MDIO_MMD_PCS] != PHY_ID_AS21XXX)
+ return genphy_match_phy_device(phydev, phydrv);
+
+ /* Read PHY ID to handle firmware just loaded */
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1);
+ if (ret < 0)
+ return ret;
+ phy_id = ret << 16;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
+ phy_id |= ret;
+
+ /* With PHY ID not the generic AS21xxx one assume
+ * the firmware just loaded
+ */
+ if (phy_id != PHY_ID_AS21XXX)
+ return phy_id == phydrv->phy_id;
+
+ /* Allocate temp priv and load the firmware */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->ipc_lock);
+
+ ret = aeon_firmware_load(phydev);
+ if (ret)
+ goto out;
+
+ /* Sync parity... */
+ ret = aeon_ipc_sync_parity(phydev, priv);
+ if (ret)
+ goto out;
+
+ /* ...and send a third NOOP cmd to wait for firmware finish loading */
+ ret = aeon_ipc_noop(phydev, priv, &ret_sts);
+ if (ret)
+ goto out;
+
+out:
+ mutex_destroy(&priv->ipc_lock);
+ kfree(priv);
+
+ /* Return can either be 0 or a negative error code.
+ * Returning 0 here means THIS is NOT a suitable PHY.
+ *
+ * For the specific case of the generic Aeonsemi PHY ID that
+ * needs the firmware the be loaded first to have a correct PHY ID,
+ * this is OK as a matching PHY ID will be found right after.
+ * This relies on the driver probe order where the first PHY driver
+ * probed is the generic one.
+ */
+ return ret;
+}
+
+static struct phy_driver as21xxx_drivers[] = {
+ {
+ /* PHY expose in C45 as 0x7500 0x9410
+ * before firmware is loaded.
+ * This driver entry must be attempted first to load
+ * the firmware and thus update the ID registers.
+ */
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21XXX),
+ .name = "Aeonsemi AS21xxx",
+ .match_phy_device = as21xxx_match_phy_device,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21011JB1),
+ .name = "Aeonsemi AS21011JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21011PB1),
+ .name = "Aeonsemi AS21011PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21010PB1),
+ .name = "Aeonsemi AS21010PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21010JB1),
+ .name = "Aeonsemi AS21010JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21210PB1),
+ .name = "Aeonsemi AS21210PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21510JB1),
+ .name = "Aeonsemi AS21510JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21510PB1),
+ .name = "Aeonsemi AS21510PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21511JB1),
+ .name = "Aeonsemi AS21511JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21210JB1),
+ .name = "Aeonsemi AS21210JB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_AS21511PB1),
+ .name = "Aeonsemi AS21511PB1",
+ .probe = as21xxx_probe,
+ .match_phy_device = as21xxx_match_phy_device,
+ .read_status = as21xxx_read_status,
+ .led_brightness_set = as21xxx_led_brightness_set,
+ .led_hw_is_supported = as21xxx_led_hw_is_supported,
+ .led_hw_control_set = as21xxx_led_hw_control_set,
+ .led_hw_control_get = as21xxx_led_hw_control_get,
+ .led_polarity_set = as21xxx_led_polarity_set,
+ },
+};
+module_phy_driver(as21xxx_drivers);
+
+static struct mdio_device_id __maybe_unused as21xxx_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(PHY_VENDOR_AEONSEMI) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, as21xxx_tbl);
+
+MODULE_DESCRIPTION("Aeonsemi AS21xxx PHY driver");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index e81404bf8994..299f9a8f30f4 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -185,14 +185,10 @@ static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int bcm8706_match_phy_device(struct phy_device *phydev)
+static int bcm87xx_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
-}
-
-static int bcm8727_match_phy_device(struct phy_device *phydev)
-{
- return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
+ return phydev->c45_ids.device_ids[4] == phydrv->phy_id;
}
static struct phy_driver bcm87xx_driver[] = {
@@ -206,7 +202,7 @@ static struct phy_driver bcm87xx_driver[] = {
.read_status = bcm87xx_read_status,
.config_intr = bcm87xx_config_intr,
.handle_interrupt = bcm87xx_handle_interrupt,
- .match_phy_device = bcm8706_match_phy_device,
+ .match_phy_device = bcm87xx_match_phy_device,
}, {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
@@ -217,7 +213,7 @@ static struct phy_driver bcm87xx_driver[] = {
.read_status = bcm87xx_read_status,
.config_intr = bcm87xx_config_intr,
.handle_interrupt = bcm87xx_handle_interrupt,
- .match_phy_device = bcm8727_match_phy_device,
+ .match_phy_device = bcm87xx_match_phy_device,
} };
module_phy_driver(bcm87xx_driver);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 85e231451093..daab555721df 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -478,13 +478,6 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
- /* Reject requests with unsupported flags */
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Reject requests to enable time stamping on both edges. */
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
@@ -513,9 +506,6 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
return 0;
case PTP_CLK_REQ_PEROUT:
- /* Reject requests with unsupported flags */
- if (rq->perout.flags)
- return -EOPNOTSUPP;
if (rq->perout.index >= N_PER_OUT)
return -EINVAL;
return periodic_output(clock, rq, on, rq->perout.index);
@@ -1002,6 +992,9 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
clock->caps.n_per_out = N_PER_OUT;
clock->caps.n_pins = DP83640_N_PINS;
clock->caps.pps = 0;
+ clock->caps.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
clock->caps.adjfine = ptp_dp83640_adjfine;
clock->caps.adjtime = ptp_dp83640_adjtime;
clock->caps.gettime64 = ptp_dp83640_gettime;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index e32013eb0186..01255dada600 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -33,6 +33,7 @@
#define MII_DP83822_MLEDCR 0x25
#define MII_DP83822_LDCTRL 0x403
#define MII_DP83822_LEDCFG1 0x460
+#define MII_DP83822_IOCTRL 0x461
#define MII_DP83822_IOCTRL1 0x462
#define MII_DP83822_IOCTRL2 0x463
#define MII_DP83822_GENCFG 0x465
@@ -118,6 +119,9 @@
#define DP83822_LEDCFG1_LED1_CTRL GENMASK(11, 8)
#define DP83822_LEDCFG1_LED3_CTRL GENMASK(7, 4)
+/* IOCTRL bits */
+#define DP83822_IOCTRL_MAC_IMPEDANCE_CTRL GENMASK(4, 1)
+
/* IOCTRL1 bits */
#define DP83822_IOCTRL1_GPIO3_CTRL GENMASK(10, 8)
#define DP83822_IOCTRL1_GPIO3_CTRL_LED3 BIT(0)
@@ -202,6 +206,7 @@ struct dp83822_private {
u32 gpio2_clk_out;
bool led_pin_enable[DP83822_MAX_LED_PINS];
int tx_amplitude_100base_tx_index;
+ int mac_termination_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -533,6 +538,12 @@ static int dp83822_config_init(struct phy_device *phydev)
FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
dp83822->tx_amplitude_100base_tx_index));
+ if (dp83822->mac_termination_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL,
+ DP83822_IOCTRL_MAC_IMPEDANCE_CTRL,
+ FIELD_PREP(DP83822_IOCTRL_MAC_IMPEDANCE_CTRL,
+ dp83822->mac_termination_index));
+
err = dp83822_config_init_leds(phydev);
if (err)
return err;
@@ -736,6 +747,10 @@ static const u32 tx_amplitude_100base_tx_gain[] = {
93, 95, 97, 98, 100, 102, 103, 105,
};
+static const u32 mac_termination[] = {
+ 99, 91, 84, 78, 73, 69, 65, 61, 58, 55, 53, 50, 48, 46, 44, 43,
+};
+
static int dp83822_of_init_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -852,6 +867,23 @@ static int dp83822_of_init(struct phy_device *phydev)
}
}
+ ret = phy_get_mac_termination(phydev, dev, &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(mac_termination); i++) {
+ if (mac_termination[i] == val) {
+ dp83822->mac_termination_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->mac_termination_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for mac-termination-ohms property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
return dp83822_of_init_leds(phydev);
}
@@ -931,6 +963,7 @@ static int dp8382x_probe(struct phy_device *phydev)
return -ENOMEM;
dp83822->tx_amplitude_100base_tx_index = -1;
+ dp83822->mac_termination_index = -1;
phydev->priv = dp83822;
return 0;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 063266cafe9c..deeefb962566 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -92,11 +92,6 @@
#define DP83867_STRAP_STS1_RESERVED BIT(11)
/* STRAP_STS2 bits */
-#define DP83867_STRAP_STS2_CLK_SKEW_TX_MASK GENMASK(6, 4)
-#define DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT 4
-#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
-#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
-#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
/* PHY CTRL bits */
@@ -111,10 +106,8 @@
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
-#define DP83867_RGMII_TX_CLK_DELAY_INV (DP83867_RGMII_TX_CLK_DELAY_MAX + 1)
#define DP83867_RGMII_RX_CLK_DELAY_MAX 0xf
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
-#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
@@ -506,48 +499,6 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev)
return 0;
}
-static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
-{
- struct dp83867_private *dp83867 = phydev->priv;
-
- /* Existing behavior was to use default pin strapping delay in rgmii
- * mode, but rgmii should have meant no delay. Warn existing users.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
- const u16 val = phy_read_mmd(phydev, DP83867_DEVADDR,
- DP83867_STRAP_STS2);
- const u16 txskew = (val & DP83867_STRAP_STS2_CLK_SKEW_TX_MASK) >>
- DP83867_STRAP_STS2_CLK_SKEW_TX_SHIFT;
- const u16 rxskew = (val & DP83867_STRAP_STS2_CLK_SKEW_RX_MASK) >>
- DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT;
-
- if (txskew != DP83867_STRAP_STS2_CLK_SKEW_NONE ||
- rxskew != DP83867_STRAP_STS2_CLK_SKEW_NONE)
- phydev_warn(phydev,
- "PHY has delays via pin strapping, but phy-mode = 'rgmii'\n"
- "Should be 'rgmii-id' to use internal delays txskew:%x rxskew:%x\n",
- txskew, rxskew);
- }
-
- /* RX delay *must* be specified if internal delay of RX is used. */
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) &&
- dp83867->rx_id_delay == DP83867_RGMII_RX_CLK_DELAY_INV) {
- phydev_err(phydev, "ti,rx-internal-delay must be specified\n");
- return -EINVAL;
- }
-
- /* TX delay *must* be specified if internal delay of TX is used. */
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) &&
- dp83867->tx_id_delay == DP83867_RGMII_TX_CLK_DELAY_INV) {
- phydev_err(phydev, "ti,tx-internal-delay must be specified\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
#if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83867_of_init_io_impedance(struct phy_device *phydev)
{
@@ -631,7 +582,7 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
"ti,sgmii-ref-clock-output-enable");
- dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
+ dp83867->rx_id_delay = DP83867_RGMIIDCTL_2_00_NS;
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (!ret && dp83867->rx_id_delay > DP83867_RGMII_RX_CLK_DELAY_MAX) {
@@ -641,7 +592,7 @@ static int dp83867_of_init(struct phy_device *phydev)
return -EINVAL;
}
- dp83867->tx_id_delay = DP83867_RGMII_TX_CLK_DELAY_INV;
+ dp83867->tx_id_delay = DP83867_RGMIIDCTL_2_00_NS;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
if (!ret && dp83867->tx_id_delay > DP83867_RGMII_TX_CLK_DELAY_MAX) {
@@ -761,7 +712,6 @@ static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
int ret, val, bs;
- u16 delay;
/* Force speed optimization for the PHY even if it strapped */
ret = phy_modify(phydev, DP83867_CFG2, DP83867_DOWNSHIFT_EN,
@@ -769,10 +719,6 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = dp83867_verify_rgmii_cfg(phydev);
- if (ret)
- return ret;
-
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
if (dp83867->rxctrl_strap_quirk)
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
@@ -836,13 +782,7 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* If rgmii mode with no internal delay is selected, we do NOT use
- * aligned mode as one might expect. Instead we use the PHY's default
- * based on pin strapping. And the "mode 0" default is to *use*
- * internal delay with a value of 7 (2.00 ns).
- *
- * Set up RGMII delays
- */
+ /* Set up RGMII delays */
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
val &= ~(DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
@@ -857,15 +797,9 @@ static int dp83867_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
- delay = 0;
- if (dp83867->rx_id_delay != DP83867_RGMII_RX_CLK_DELAY_INV)
- delay |= dp83867->rx_id_delay;
- if (dp83867->tx_id_delay != DP83867_RGMII_TX_CLK_DELAY_INV)
- delay |= dp83867->tx_id_delay <<
- DP83867_RGMII_TX_CLK_DELAY_SHIFT;
-
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
- delay);
+ dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
}
/* If specified, set io impedance */
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index ee7831a9849b..033656d574b8 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -131,7 +131,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status,
+ const struct fixed_phy_status *status,
struct gpio_desc *gpiod)
{
int ret;
@@ -160,10 +160,9 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
return 0;
}
-int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status)
+int fixed_phy_add(int phy_addr, const struct fixed_phy_status *status)
{
- return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
+ return fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, NULL);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
@@ -223,12 +222,11 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
}
#endif
-static struct phy_device *__fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np,
- struct gpio_desc *gpiod)
+struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct gpio_desc *gpiod;
struct phy_device *phy;
int phy_addr;
int ret;
@@ -237,18 +235,16 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
return ERR_PTR(-EPROBE_DEFER);
/* Check if we have a GPIO associated with this fixed phy */
- if (!gpiod) {
- gpiod = fixed_phy_get_gpiod(np);
- if (IS_ERR(gpiod))
- return ERR_CAST(gpiod);
- }
+ gpiod = fixed_phy_get_gpiod(np);
+ if (IS_ERR(gpiod))
+ return ERR_CAST(gpiod);
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod);
+ ret = fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, gpiod);
if (ret < 0) {
ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
@@ -306,24 +302,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
return phy;
}
-
-struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
-{
- return __fixed_phy_register(irq, status, np, NULL);
-}
EXPORT_SYMBOL_GPL(fixed_phy_register);
-struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
-{
- return __fixed_phy_register(irq, status, NULL, gpiod);
-}
-EXPORT_SYMBOL_GPL(fixed_phy_register_with_gpiod);
-
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index bbcc7d2b54cd..c0c4f19cfb6a 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -520,12 +520,14 @@ static int ip101a_g_match_phy_device(struct phy_device *phydev, bool ip101a)
return ip101a == !ret;
}
-static int ip101a_match_phy_device(struct phy_device *phydev)
+static int ip101a_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ip101a_g_match_phy_device(phydev, true);
}
-static int ip101g_match_phy_device(struct phy_device *phydev)
+static int ip101g_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ip101a_g_match_phy_device(phydev, false);
}
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 23e1f0521f54..f3d83b04c953 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -119,7 +119,6 @@
#define MV88Q2XXX_LED_INDEX_GPIO 1
struct mv88q2xxx_priv {
- bool enable_temp;
bool enable_led0;
};
@@ -482,49 +481,6 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
return phydev->drv->soft_reset(phydev);
}
-static int mv88q2xxx_config_init(struct phy_device *phydev)
-{
- struct mv88q2xxx_priv *priv = phydev->priv;
- int ret;
-
- /* The 88Q2XXX PHYs do have the extended ability register available, but
- * register MDIO_PMA_EXTABLE where they should signalize it does not
- * work according to specification. Therefore, we force it here.
- */
- phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
-
- /* Configure interrupt with default settings, output is driven low for
- * active interrupt and high for inactive.
- */
- if (phy_interrupt_is_valid(phydev)) {
- ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
- if (ret < 0)
- return ret;
- }
-
- /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
- if (priv->enable_led0) {
- ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_RESET_CTRL,
- MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
- if (ret < 0)
- return ret;
- }
-
- /* Enable temperature sense */
- if (priv->enable_temp) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -667,6 +623,12 @@ static int mv88q2xxx_resume(struct phy_device *phydev)
}
#if IS_ENABLED(CONFIG_HWMON)
+static int mv88q2xxx_enable_temp_sense(struct phy_device *phydev)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+}
+
static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
NULL
@@ -762,11 +724,13 @@ static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
- struct mv88q2xxx_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
+ int ret;
- priv->enable_temp = true;
+ ret = mv88q2xxx_enable_temp_sense(phydev);
+ if (ret < 0)
+ return ret;
hwmon = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&mv88q2xxx_hwmon_chip_info,
@@ -776,6 +740,11 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
}
#else
+static int mv88q2xxx_enable_temp_sense(struct phy_device *phydev)
+{
+ return 0;
+}
+
static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
{
return 0;
@@ -828,6 +797,7 @@ static int mv88q2xxx_leds_probe(struct phy_device *phydev)
static int mv88q2xxx_probe(struct phy_device *phydev)
{
struct mv88q2xxx_priv *priv;
+ int ret;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -835,22 +805,53 @@ static int mv88q2xxx_probe(struct phy_device *phydev)
phydev->priv = priv;
- return 0;
+ ret = mv88q2xxx_leds_probe(phydev);
+ if (ret)
+ return ret;
+
+ return mv88q2xxx_hwmon_probe(phydev);
}
-static int mv88q222x_probe(struct phy_device *phydev)
+static int mv88q2xxx_config_init(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
int ret;
- ret = mv88q2xxx_probe(phydev);
- if (ret)
- return ret;
+ /* The 88Q2XXX PHYs do have the extended ability register available, but
+ * register MDIO_PMA_EXTABLE where they should signalize it does not
+ * work according to specification. Therefore, we force it here.
+ */
+ phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
- ret = mv88q2xxx_leds_probe(phydev);
- if (ret)
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev)) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
+ if (priv->enable_led0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RESET_CTRL,
+ MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable temperature sense again. There might have been a hard reset
+ * of the PHY and in this case the register content is restored to
+ * defaults and we need to enable it again.
+ */
+ ret = mv88q2xxx_enable_temp_sense(phydev);
+ if (ret < 0)
return ret;
- return mv88q2xxx_hwmon_probe(phydev);
+ return 0;
}
static int mv88q2110_config_init(struct phy_device *phydev)
@@ -1118,7 +1119,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
- .probe = mv88q222x_probe,
+ .probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 5354c8895163..13e81dff42c1 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1264,7 +1264,8 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev)
return ret + 1;
}
-static int mv3310_match_phy_device(struct phy_device *phydev)
+static int mv3310_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
@@ -1273,7 +1274,8 @@ static int mv3310_match_phy_device(struct phy_device *phydev)
return mv3310_get_number_of_ports(phydev) == 1;
}
-static int mv3340_match_phy_device(struct phy_device *phydev)
+static int mv3340_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
@@ -1297,12 +1299,14 @@ static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
return !!(val & MDIO_PCS_SPEED_5G) == has_5g;
}
-static int mv2110_match_phy_device(struct phy_device *phydev)
+static int mv2110_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return mv211x_match_phy_device(phydev, true);
}
-static int mv2111_match_phy_device(struct phy_device *phydev)
+static int mv2111_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return mv211x_match_phy_device(phydev, false);
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index ede596c1a69d..a6bcb0fee863 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -8,17 +8,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/micrel_phy.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -27,7 +24,6 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/reset.h>
-#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -37,8 +33,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/mdio.h>
-#include "mdio-boardinfo.h"
-
static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
{
/* Deassert the optional reset signal */
@@ -137,45 +131,6 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr)
EXPORT_SYMBOL(mdiobus_is_registered_device);
/**
- * mdiobus_alloc_size - allocate a mii_bus structure
- * @size: extra amount of memory to allocate for private storage.
- * If non-zero, then bus->priv is points to that memory.
- *
- * Description: called by a bus driver to allocate an mii_bus
- * structure to fill in.
- */
-struct mii_bus *mdiobus_alloc_size(size_t size)
-{
- struct mii_bus *bus;
- size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
- size_t alloc_size;
- int i;
-
- /* If we alloc extra space, it should be aligned */
- if (size)
- alloc_size = aligned_size + size;
- else
- alloc_size = sizeof(*bus);
-
- bus = kzalloc(alloc_size, GFP_KERNEL);
- if (!bus)
- return NULL;
-
- bus->state = MDIOBUS_ALLOCATED;
- if (size)
- bus->priv = (void *)bus + aligned_size;
-
- /* Initialise the interrupts to polling and 64-bit seqcounts */
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- bus->irq[i] = PHY_POLL;
- u64_stats_init(&bus->stats[i].syncp);
- }
-
- return bus;
-}
-EXPORT_SYMBOL(mdiobus_alloc_size);
-
-/**
* mdiobus_release - mii_bus device release callback
* @d: the target struct device that contains the mii_bus
*
@@ -403,11 +358,12 @@ static const struct attribute_group *mdio_bus_groups[] = {
NULL,
};
-static struct class mdio_bus_class = {
+const struct class mdio_bus_class = {
.name = "mdio_bus",
.dev_release = mdiobus_release,
.dev_groups = mdio_bus_groups,
};
+EXPORT_SYMBOL_GPL(mdio_bus_class);
/**
* mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
@@ -451,422 +407,8 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
return d ? to_mii_bus(d) : NULL;
}
EXPORT_SYMBOL(of_mdio_find_bus);
-
-/* Walk the list of subnodes of a mdio bus and look for a node that
- * matches the mdio device's address with its 'reg' property. If
- * found, set the of_node pointer for the mdio device. This allows
- * auto-probed phy devices to be supplied with information passed in
- * via DT.
- * If a PHY package is found, PHY is searched also there.
- */
-static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
- struct device_node *np)
-{
- struct device_node *child;
-
- for_each_available_child_of_node(np, child) {
- int addr;
-
- if (of_node_name_eq(child, "ethernet-phy-package")) {
- /* Validate PHY package reg presence */
- if (!of_property_present(child, "reg")) {
- of_node_put(child);
- return -EINVAL;
- }
-
- if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
- /* The refcount for the PHY package will be
- * incremented later when PHY join the Package.
- */
- of_node_put(child);
- return 0;
- }
-
- continue;
- }
-
- addr = of_mdio_parse_addr(dev, child);
- if (addr < 0)
- continue;
-
- if (addr == mdiodev->addr) {
- device_set_node(dev, of_fwnode_handle(child));
- /* The refcount on "child" is passed to the mdio
- * device. Do _not_ use of_node_put(child) here.
- */
- return 0;
- }
- }
-
- return -ENODEV;
-}
-
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
-{
- struct device *dev = &mdiodev->dev;
-
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
-}
-#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
-static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
- struct mdio_device *mdiodev)
-{
-}
#endif
-/**
- * mdiobus_create_device - create a full MDIO device given
- * a mdio_board_info structure
- * @bus: MDIO bus to create the devices on
- * @bi: mdio_board_info structure describing the devices
- *
- * Returns 0 on success or < 0 on error.
- */
-static int mdiobus_create_device(struct mii_bus *bus,
- struct mdio_board_info *bi)
-{
- struct mdio_device *mdiodev;
- int ret = 0;
-
- mdiodev = mdio_device_create(bus, bi->mdio_addr);
- if (IS_ERR(mdiodev))
- return -ENODEV;
-
- strscpy(mdiodev->modalias, bi->modalias,
- sizeof(mdiodev->modalias));
- mdiodev->bus_match = mdio_device_bus_match;
- mdiodev->dev.platform_data = (void *)bi->platform_data;
-
- ret = mdio_device_register(mdiodev);
- if (ret)
- mdio_device_free(mdiodev);
-
- return ret;
-}
-
-static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
-{
- struct phy_device *phydev = ERR_PTR(-ENODEV);
- struct fwnode_handle *fwnode;
- char node_name[16];
- int err;
-
- phydev = get_phy_device(bus, addr, c45);
- if (IS_ERR(phydev))
- return phydev;
-
- /* For DT, see if the auto-probed phy has a corresponding child
- * in the bus node, and set the of_node pointer in this case.
- */
- of_mdiobus_link_mdiodev(bus, &phydev->mdio);
-
- /* Search for a swnode for the phy in the swnode hierarchy of the bus.
- * If there is no swnode for the phy provided, just ignore it.
- */
- if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
- snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
- addr);
- fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
- node_name);
- if (fwnode)
- device_set_node(&phydev->mdio.dev, fwnode);
- }
-
- err = phy_device_register(phydev);
- if (err) {
- phy_device_free(phydev);
- return ERR_PTR(-ENODEV);
- }
-
- return phydev;
-}
-
-/**
- * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans one address on the MDIO bus, looking for
- * devices which can be identified using a vendor/product ID in
- * registers 2 and 3. Not all MDIO devices have such registers, but
- * PHY devices typically do. Hence this function assumes anything
- * found is a PHY, or can be treated as a PHY. Other MDIO devices,
- * such as switches, will probably not be found during the scan.
- */
-struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
-{
- return mdiobus_scan(bus, addr, false);
-}
-EXPORT_SYMBOL(mdiobus_scan_c22);
-
-/**
- * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans one address on the MDIO bus, looking for
- * devices which can be identified using a vendor/product ID in
- * registers 2 and 3. Not all MDIO devices have such registers, but
- * PHY devices typically do. Hence this function assumes anything
- * found is a PHY, or can be treated as a PHY. Other MDIO devices,
- * such as switches, will probably not be found during the scan.
- */
-static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
-{
- return mdiobus_scan(bus, addr, true);
-}
-
-static int mdiobus_scan_bus_c22(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
-
- phydev = mdiobus_scan_c22(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
- return PTR_ERR(phydev);
- }
- }
- return 0;
-}
-
-static int mdiobus_scan_bus_c45(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
-
- /* Don't scan C45 if we already have a C22 device */
- if (bus->mdio_map[i])
- continue;
-
- phydev = mdiobus_scan_c45(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
- return PTR_ERR(phydev);
- }
- }
- return 0;
-}
-
-/* There are some C22 PHYs which do bad things when where is a C45
- * transaction on the bus, like accepting a read themselves, and
- * stomping over the true devices reply, to performing a write to
- * themselves which was intended for another device. Now that C22
- * devices have been found, see if any of them are bad for C45, and if we
- * should skip the C45 scan.
- */
-static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
-{
- int i;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
- u32 oui;
-
- phydev = mdiobus_get_phy(bus, i);
- if (!phydev)
- continue;
- oui = phydev->phy_id >> 10;
-
- if (oui == MICREL_OUI)
- return true;
- }
- return false;
-}
-
-/**
- * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
- * @bus: target mii_bus
- * @owner: module containing bus accessor functions
- *
- * Description: Called by a bus driver to bring up all the PHYs
- * on a given bus, and attach them to the bus. Drivers should use
- * mdiobus_register() rather than __mdiobus_register() unless they
- * need to pass a specific owner module. MDIO devices which are not
- * PHYs will not be brought up by this function. They are expected
- * to be explicitly listed in DT and instantiated by of_mdiobus_register().
- *
- * Returns 0 on success or < 0 on error.
- */
-int __mdiobus_register(struct mii_bus *bus, struct module *owner)
-{
- struct mdio_device *mdiodev;
- struct gpio_desc *gpiod;
- bool prevent_c45_scan;
- int i, err;
-
- if (!bus || !bus->name)
- return -EINVAL;
-
- /* An access method always needs both read and write operations */
- if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
- return -EINVAL;
-
- /* At least one method is mandatory */
- if (!bus->read && !bus->read_c45)
- return -EINVAL;
-
- if (bus->parent && bus->parent->of_node)
- bus->parent->of_node->fwnode.flags |=
- FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
-
- WARN(bus->state != MDIOBUS_ALLOCATED &&
- bus->state != MDIOBUS_UNREGISTERED,
- "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
-
- bus->owner = owner;
- bus->dev.parent = bus->parent;
- bus->dev.class = &mdio_bus_class;
- bus->dev.groups = NULL;
- dev_set_name(&bus->dev, "%s", bus->id);
-
- /* If the bus state is allocated, we're registering a fresh bus
- * that may have a fwnode associated with it. Grab a reference
- * to the fwnode. This will be dropped when the bus is released.
- * If the bus was set to unregistered, it means that the bus was
- * previously registered, and we've already grabbed a reference.
- */
- if (bus->state == MDIOBUS_ALLOCATED)
- fwnode_handle_get(dev_fwnode(&bus->dev));
-
- /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
- * the device in mdiobus_free()
- *
- * State will be updated later in this function in case of success
- */
- bus->state = MDIOBUS_UNREGISTERED;
-
- err = device_register(&bus->dev);
- if (err) {
- pr_err("mii_bus %s failed to register\n", bus->id);
- return -EINVAL;
- }
-
- mutex_init(&bus->mdio_lock);
- mutex_init(&bus->shared_lock);
-
- /* assert bus level PHY GPIO reset */
- gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpiod)) {
- err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
- "mii_bus %s couldn't get reset GPIO\n",
- bus->id);
- device_del(&bus->dev);
- return err;
- } else if (gpiod) {
- bus->reset_gpiod = gpiod;
- fsleep(bus->reset_delay_us);
- gpiod_set_value_cansleep(gpiod, 0);
- if (bus->reset_post_delay_us > 0)
- fsleep(bus->reset_post_delay_us);
- }
-
- if (bus->reset) {
- err = bus->reset(bus);
- if (err)
- goto error_reset_gpiod;
- }
-
- if (bus->read) {
- err = mdiobus_scan_bus_c22(bus);
- if (err)
- goto error;
- }
-
- prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
-
- if (!prevent_c45_scan && bus->read_c45) {
- err = mdiobus_scan_bus_c45(bus);
- if (err)
- goto error;
- }
-
- mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
-
- bus->state = MDIOBUS_REGISTERED;
- dev_dbg(&bus->dev, "probed\n");
- return 0;
-
-error:
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- mdiodev = bus->mdio_map[i];
- if (!mdiodev)
- continue;
-
- mdiodev->device_remove(mdiodev);
- mdiodev->device_free(mdiodev);
- }
-error_reset_gpiod:
- /* Put PHYs in RESET to save power */
- if (bus->reset_gpiod)
- gpiod_set_value_cansleep(bus->reset_gpiod, 1);
-
- device_del(&bus->dev);
- return err;
-}
-EXPORT_SYMBOL(__mdiobus_register);
-
-void mdiobus_unregister(struct mii_bus *bus)
-{
- struct mdio_device *mdiodev;
- int i;
-
- if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
- return;
- bus->state = MDIOBUS_UNREGISTERED;
-
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- mdiodev = bus->mdio_map[i];
- if (!mdiodev)
- continue;
-
- if (mdiodev->reset_gpio)
- gpiod_put(mdiodev->reset_gpio);
-
- mdiodev->device_remove(mdiodev);
- mdiodev->device_free(mdiodev);
- }
-
- /* Put PHYs in RESET to save power */
- if (bus->reset_gpiod)
- gpiod_set_value_cansleep(bus->reset_gpiod, 1);
-
- device_del(&bus->dev);
-}
-EXPORT_SYMBOL(mdiobus_unregister);
-
-/**
- * mdiobus_free - free a struct mii_bus
- * @bus: mii_bus to free
- *
- * This function releases the reference to the underlying device
- * object in the mii_bus. If this is the last reference, the mii_bus
- * will be freed.
- */
-void mdiobus_free(struct mii_bus *bus)
-{
- /* For compatibility with error handling in drivers. */
- if (bus->state == MDIOBUS_ALLOCATED) {
- kfree(bus);
- return;
- }
-
- WARN(bus->state != MDIOBUS_UNREGISTERED,
- "%s: not in UNREGISTERED state\n", bus->id);
- bus->state = MDIOBUS_RELEASED;
-
- put_device(&bus->dev);
-}
-EXPORT_SYMBOL(mdiobus_free);
-
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{
preempt_disable();
@@ -1446,7 +988,7 @@ const struct bus_type mdio_bus_type = {
};
EXPORT_SYMBOL(mdio_bus_type);
-int __init mdio_bus_init(void)
+static int __init mdio_bus_init(void)
{
int ret;
@@ -1460,16 +1002,14 @@ int __init mdio_bus_init(void)
return ret;
}
-#if IS_ENABLED(CONFIG_PHYLIB)
-void mdio_bus_exit(void)
+static void __exit mdio_bus_exit(void)
{
class_unregister(&mdio_bus_class);
bus_unregister(&mdio_bus_type);
}
-EXPORT_SYMBOL_GPL(mdio_bus_exit);
-#else
-module_init(mdio_bus_init);
-/* no module_exit, intentional */
+
+subsys_initcall(mdio_bus_init);
+module_exit(mdio_bus_exit);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MDIO bus/device layer");
-#endif
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
new file mode 100644
index 000000000000..65850e36284d
--- /dev/null
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* MDIO Bus provider interface
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/micrel_phy.h>
+#include <linux/mii.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+
+#include "mdio-boardinfo.h"
+
+/**
+ * mdiobus_alloc_size - allocate a mii_bus structure
+ * @size: extra amount of memory to allocate for private storage.
+ * If non-zero, then bus->priv is points to that memory.
+ *
+ * Description: called by a bus driver to allocate an mii_bus
+ * structure to fill in.
+ */
+struct mii_bus *mdiobus_alloc_size(size_t size)
+{
+ struct mii_bus *bus;
+ size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
+ size_t alloc_size;
+ int i;
+
+ /* If we alloc extra space, it should be aligned */
+ if (size)
+ alloc_size = aligned_size + size;
+ else
+ alloc_size = sizeof(*bus);
+
+ bus = kzalloc(alloc_size, GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus->state = MDIOBUS_ALLOCATED;
+ if (size)
+ bus->priv = (void *)bus + aligned_size;
+
+ /* Initialise the interrupts to polling and 64-bit seqcounts */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ bus->irq[i] = PHY_POLL;
+ u64_stats_init(&bus->stats[i].syncp);
+ }
+
+ return bus;
+}
+EXPORT_SYMBOL(mdiobus_alloc_size);
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Walk the list of subnodes of a mdio bus and look for a node that
+ * matches the mdio device's address with its 'reg' property. If
+ * found, set the of_node pointer for the mdio device. This allows
+ * auto-probed phy devices to be supplied with information passed in
+ * via DT.
+ * If a PHY package is found, PHY is searched also there.
+ */
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(np, child) {
+ int addr;
+
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(dev, child);
+ if (addr < 0)
+ continue;
+
+ if (addr == mdiodev->addr) {
+ device_set_node(dev, of_fwnode_handle(child));
+ /* The refcount on "child" is passed to the mdio
+ * device. Do _not_ use of_node_put(child) here.
+ */
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
+}
+#endif
+
+/**
+ * mdiobus_create_device - create a full MDIO device given
+ * a mdio_board_info structure
+ * @bus: MDIO bus to create the devices on
+ * @bi: mdio_board_info structure describing the devices
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+static int mdiobus_create_device(struct mii_bus *bus,
+ struct mdio_board_info *bi)
+{
+ struct mdio_device *mdiodev;
+ int ret = 0;
+
+ mdiodev = mdio_device_create(bus, bi->mdio_addr);
+ if (IS_ERR(mdiodev))
+ return -ENODEV;
+
+ strscpy(mdiodev->modalias, bi->modalias,
+ sizeof(mdiodev->modalias));
+ mdiodev->bus_match = mdio_device_bus_match;
+ mdiodev->dev.platform_data = (void *)bi->platform_data;
+
+ ret = mdio_device_register(mdiodev);
+ if (ret)
+ mdio_device_free(mdiodev);
+
+ return ret;
+}
+
+static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
+{
+ struct phy_device *phydev = ERR_PTR(-ENODEV);
+ struct fwnode_handle *fwnode;
+ char node_name[16];
+ int err;
+
+ phydev = get_phy_device(bus, addr, c45);
+ if (IS_ERR(phydev))
+ return phydev;
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+ /* For DT, see if the auto-probed phy has a corresponding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+#endif
+
+ /* Search for a swnode for the phy in the swnode hierarchy of the bus.
+ * If there is no swnode for the phy provided, just ignore it.
+ */
+ if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
+ snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
+ addr);
+ fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
+ node_name);
+ if (fwnode)
+ device_set_node(&phydev->mdio.dev, fwnode);
+ }
+
+ err = phy_device_register(phydev);
+ if (err) {
+ phy_device_free(phydev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
+/**
+ * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, false);
+}
+EXPORT_SYMBOL(mdiobus_scan_c22);
+
+/**
+ * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, true);
+}
+
+static int mdiobus_scan_bus_c22(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan_c22(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+static int mdiobus_scan_bus_c45(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ /* Don't scan C45 if we already have a C22 device */
+ if (bus->mdio_map[i])
+ continue;
+
+ phydev = mdiobus_scan_c45(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+/* There are some C22 PHYs which do bad things when where is a C45
+ * transaction on the bus, like accepting a read themselves, and
+ * stomping over the true devices reply, to performing a write to
+ * themselves which was intended for another device. Now that C22
+ * devices have been found, see if any of them are bad for C45, and if we
+ * should skip the C45 scan.
+ */
+static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+ u32 oui;
+
+ phydev = mdiobus_get_phy(bus, i);
+ if (!phydev)
+ continue;
+ oui = phydev->phy_id >> 10;
+
+ if (oui == MICREL_OUI)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
+ * @bus: target mii_bus
+ * @owner: module containing bus accessor functions
+ *
+ * Description: Called by a bus driver to bring up all the PHYs
+ * on a given bus, and attach them to the bus. Drivers should use
+ * mdiobus_register() rather than __mdiobus_register() unless they
+ * need to pass a specific owner module. MDIO devices which are not
+ * PHYs will not be brought up by this function. They are expected
+ * to be explicitly listed in DT and instantiated by of_mdiobus_register().
+ *
+ * Returns 0 on success or < 0 on error.
+ */
+int __mdiobus_register(struct mii_bus *bus, struct module *owner)
+{
+ struct mdio_device *mdiodev;
+ struct gpio_desc *gpiod;
+ bool prevent_c45_scan;
+ int i, err;
+
+ if (!bus || !bus->name)
+ return -EINVAL;
+
+ /* An access method always needs both read and write operations */
+ if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
+ return -EINVAL;
+
+ /* At least one method is mandatory */
+ if (!bus->read && !bus->read_c45)
+ return -EINVAL;
+
+ if (bus->parent && bus->parent->of_node)
+ bus->parent->of_node->fwnode.flags |=
+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
+ WARN(bus->state != MDIOBUS_ALLOCATED &&
+ bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in ALLOCATED or UNREGISTERED state\n", bus->id);
+
+ bus->owner = owner;
+ bus->dev.parent = bus->parent;
+ bus->dev.class = &mdio_bus_class;
+ bus->dev.groups = NULL;
+ dev_set_name(&bus->dev, "%s", bus->id);
+
+ /* If the bus state is allocated, we're registering a fresh bus
+ * that may have a fwnode associated with it. Grab a reference
+ * to the fwnode. This will be dropped when the bus is released.
+ * If the bus was set to unregistered, it means that the bus was
+ * previously registered, and we've already grabbed a reference.
+ */
+ if (bus->state == MDIOBUS_ALLOCATED)
+ fwnode_handle_get(dev_fwnode(&bus->dev));
+
+ /* We need to set state to MDIOBUS_UNREGISTERED to correctly release
+ * the device in mdiobus_free()
+ *
+ * State will be updated later in this function in case of success
+ */
+ bus->state = MDIOBUS_UNREGISTERED;
+
+ err = device_register(&bus->dev);
+ if (err) {
+ pr_err("mii_bus %s failed to register\n", bus->id);
+ return -EINVAL;
+ }
+
+ mutex_init(&bus->mdio_lock);
+ mutex_init(&bus->shared_lock);
+
+ /* assert bus level PHY GPIO reset */
+ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod)) {
+ err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
+ "mii_bus %s couldn't get reset GPIO\n",
+ bus->id);
+ device_del(&bus->dev);
+ return err;
+ } else if (gpiod) {
+ bus->reset_gpiod = gpiod;
+ fsleep(bus->reset_delay_us);
+ gpiod_set_value_cansleep(gpiod, 0);
+ if (bus->reset_post_delay_us > 0)
+ fsleep(bus->reset_post_delay_us);
+ }
+
+ if (bus->reset) {
+ err = bus->reset(bus);
+ if (err)
+ goto error_reset_gpiod;
+ }
+
+ if (bus->read) {
+ err = mdiobus_scan_bus_c22(bus);
+ if (err)
+ goto error;
+ }
+
+ prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
+
+ if (!prevent_c45_scan && bus->read_c45) {
+ err = mdiobus_scan_bus_c45(bus);
+ if (err)
+ goto error;
+ }
+
+ mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
+
+ bus->state = MDIOBUS_REGISTERED;
+ dev_dbg(&bus->dev, "probed\n");
+ return 0;
+
+error:
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ mdiodev = bus->mdio_map[i];
+ if (!mdiodev)
+ continue;
+
+ mdiodev->device_remove(mdiodev);
+ mdiodev->device_free(mdiodev);
+ }
+error_reset_gpiod:
+ /* Put PHYs in RESET to save power */
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+
+ device_del(&bus->dev);
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_register);
+
+void mdiobus_unregister(struct mii_bus *bus)
+{
+ struct mdio_device *mdiodev;
+ int i;
+
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
+ bus->state = MDIOBUS_UNREGISTERED;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ mdiodev = bus->mdio_map[i];
+ if (!mdiodev)
+ continue;
+
+ if (mdiodev->reset_gpio)
+ gpiod_put(mdiodev->reset_gpio);
+
+ mdiodev->device_remove(mdiodev);
+ mdiodev->device_free(mdiodev);
+ }
+
+ /* Put PHYs in RESET to save power */
+ if (bus->reset_gpiod)
+ gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+
+ device_del(&bus->dev);
+}
+EXPORT_SYMBOL(mdiobus_unregister);
+
+/**
+ * mdiobus_free - free a struct mii_bus
+ * @bus: mii_bus to free
+ *
+ * This function releases the reference to the underlying device
+ * object in the mii_bus. If this is the last reference, the mii_bus
+ * will be freed.
+ */
+void mdiobus_free(struct mii_bus *bus)
+{
+ /* For compatibility with error handling in drivers. */
+ if (bus->state == MDIOBUS_ALLOCATED) {
+ kfree(bus);
+ return;
+ }
+
+ WARN(bus->state != MDIOBUS_UNREGISTERED,
+ "%s: not in UNREGISTERED state\n", bus->id);
+ bus->state = MDIOBUS_RELEASED;
+
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL(mdiobus_free);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e747ee63c665..cce3f405d1a4 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -45,6 +45,7 @@ int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
return strcmp(mdiodev->modalias, drv->name) == 0;
}
+EXPORT_SYMBOL_GPL(mdio_device_bus_match);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
{
diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig
index 2a8ac5aed0f8..9f30a91be8dd 100644
--- a/drivers/net/phy/mediatek/Kconfig
+++ b/drivers/net/phy/mediatek/Kconfig
@@ -1,6 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
-config MTK_NET_PHYLIB
- tristate
+config MEDIATEK_2P5GE_PHY
+ tristate "MediaTek 2.5Gb Ethernet PHYs"
+ depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST
+ select MTK_NET_PHYLIB
+ help
+ Supports MediaTek SoC built-in 2.5Gb Ethernet PHYs.
+
+ This will load necessary firmware and add appropriate time delay.
+ Accelerate this procedure through internal pbus instead of MDIO
+ bus. Certain link-up issues will also be fixed here.
config MEDIATEK_GE_PHY
tristate "MediaTek Gigabit Ethernet PHYs"
@@ -15,8 +23,9 @@ config MEDIATEK_GE_PHY
config MEDIATEK_GE_SOC_PHY
tristate "MediaTek SoC Ethernet PHYs"
- depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST
- depends on NVMEM_MTK_EFUSE
+ depends on ARM64 || COMPILE_TEST
+ depends on ARCH_AIROHA || (ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || \
+ COMPILE_TEST
select MTK_NET_PHYLIB
help
Supports MediaTek SoC built-in Gigabit Ethernet PHYs.
@@ -25,3 +34,6 @@ config MEDIATEK_GE_SOC_PHY
the MT7981 and MT7988 SoCs. These PHYs need calibration data
present in the SoCs efuse and will dynamically calibrate VCM
(common-mode voltage) during startup.
+
+config MTK_NET_PHYLIB
+ tristate
diff --git a/drivers/net/phy/mediatek/Makefile b/drivers/net/phy/mediatek/Makefile
index 814879d0abe5..ac57ecc799fc 100644
--- a/drivers/net/phy/mediatek/Makefile
+++ b/drivers/net/phy/mediatek/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o
+obj-$(CONFIG_MEDIATEK_2P5GE_PHY) += mtk-2p5ge.o
obj-$(CONFIG_MEDIATEK_GE_PHY) += mtk-ge.o
obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mtk-ge-soc.o
+obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o
diff --git a/drivers/net/phy/mediatek/mtk-2p5ge.c b/drivers/net/phy/mediatek/mtk-2p5ge.c
new file mode 100644
index 000000000000..e147eab523ef
--- /dev/null
+++ b/drivers/net/phy/mediatek/mtk-2p5ge.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/phy.h>
+
+#include "mtk.h"
+
+#define MTK_2P5GPHY_ID_MT7988 0x00339c11
+
+#define MT7988_2P5GE_PMB_FW "mediatek/mt7988/i2p5ge-phy-pmb.bin"
+#define MT7988_2P5GE_PMB_FW_SIZE 0x20000
+#define MT7988_2P5GE_PMB_FW_BASE 0x0f100000
+#define MT7988_2P5GE_PMB_FW_LEN 0x20000
+#define MTK_2P5GPHY_MCU_CSR_BASE 0x0f0f0000
+#define MTK_2P5GPHY_MCU_CSR_LEN 0x20
+#define MD32_EN_CFG 0x18
+#define MD32_EN BIT(0)
+
+#define BASE100T_STATUS_EXTEND 0x10
+#define BASE1000T_STATUS_EXTEND 0x11
+#define EXTEND_CTRL_AND_STATUS 0x16
+
+#define PHY_AUX_CTRL_STATUS 0x1d
+#define PHY_AUX_DPX_MASK GENMASK(5, 5)
+#define PHY_AUX_SPEED_MASK GENMASK(4, 2)
+
+/* Registers on MDIO_MMD_VEND1 */
+#define MTK_PHY_LPI_PCS_DSP_CTRL 0x121
+#define MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK GENMASK(12, 8)
+
+#define MTK_PHY_HOST_CMD1 0x800e
+#define MTK_PHY_HOST_CMD2 0x800f
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
+#define AUTO_NP_10XEN BIT(6)
+
+enum {
+ PHY_AUX_SPD_10 = 0,
+ PHY_AUX_SPD_100,
+ PHY_AUX_SPD_1000,
+ PHY_AUX_SPD_2500,
+};
+
+static int mt798x_2p5ge_phy_load_fw(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ void __iomem *mcu_csr_base, *pmb_addr;
+ const struct firmware *fw;
+ int ret, i;
+ u32 reg;
+
+ pmb_addr = ioremap(MT7988_2P5GE_PMB_FW_BASE, MT7988_2P5GE_PMB_FW_LEN);
+ if (!pmb_addr)
+ return -ENOMEM;
+ mcu_csr_base = ioremap(MTK_2P5GPHY_MCU_CSR_BASE,
+ MTK_2P5GPHY_MCU_CSR_LEN);
+ if (!mcu_csr_base) {
+ ret = -ENOMEM;
+ goto free_pmb;
+ }
+
+ ret = request_firmware_direct(&fw, MT7988_2P5GE_PMB_FW, dev);
+ if (ret) {
+ dev_err(dev, "failed to load firmware: %s, ret: %d\n",
+ MT7988_2P5GE_PMB_FW, ret);
+ goto free;
+ }
+
+ if (fw->size != MT7988_2P5GE_PMB_FW_SIZE) {
+ dev_err(dev, "Firmware size 0x%zx != 0x%x\n",
+ fw->size, MT7988_2P5GE_PMB_FW_SIZE);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ reg = readw(mcu_csr_base + MD32_EN_CFG);
+ if (reg & MD32_EN) {
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ usleep_range(10000, 11000);
+ }
+ phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
+
+ /* Write magic number to safely stall MCU */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD1, 0x1100);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_HOST_CMD2, 0x00df);
+
+ for (i = 0; i < MT7988_2P5GE_PMB_FW_SIZE - 1; i += 4)
+ writel(*((uint32_t *)(fw->data + i)), pmb_addr + i);
+
+ writew(reg & ~MD32_EN, mcu_csr_base + MD32_EN_CFG);
+ writew(reg | MD32_EN, mcu_csr_base + MD32_EN_CFG);
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ /* We need a delay here to stabilize initialization of MCU */
+ usleep_range(7000, 8000);
+
+ dev_info(dev, "Firmware date code: %x/%x/%x, version: %x.%x\n",
+ be16_to_cpu(*((__be16 *)(fw->data +
+ MT7988_2P5GE_PMB_FW_SIZE - 8))),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 6),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 5),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 2),
+ *(fw->data + MT7988_2P5GE_PMB_FW_SIZE - 1));
+
+release_fw:
+ release_firmware(fw);
+free:
+ iounmap(mcu_csr_base);
+free_pmb:
+ iounmap(pmb_addr);
+
+ return ret;
+}
+
+static int mt798x_2p5ge_phy_config_init(struct phy_device *phydev)
+{
+ /* Check if PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
+ return -ENODEV;
+
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LPI_PCS_DSP_CTRL,
+ MTK_PHY_LPI_SIG_EN_LO_THRESH100_MASK, 0);
+
+ /* Enable 16-bit next page exchange bit if 1000-BT isn't advertising */
+ mtk_tr_modify(phydev, 0x0, 0xf, 0x3c, AUTO_NP_10XEN,
+ FIELD_PREP(AUTO_NP_10XEN, 0x1));
+
+ /* Enable HW auto downshift */
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1,
+ MTK_PHY_AUX_CTRL_AND_STATUS,
+ 0, MTK_PHY_ENABLE_DOWNSHIFT);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u32 adv;
+ int ret;
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 doesn't define 1000BaseT support. Use Clause 22 instead in
+ * our design.
+ */
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ ret = phy_modify_changed(phydev, MII_CTRL1000, ADVERTISE_1000FULL, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int mt798x_2p5ge_phy_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* This phy can't handle collision, and neither can (XFI)MAC it's
+ * connected to. Although it can do HDX handshake, it doesn't support
+ * CSMA/CD that HDX requires.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ /* When MDIO_STAT1_LSTATUS is raised genphy_c45_read_link(), this phy
+ * actually hasn't finished AN. So use CL22's link update function
+ * instead.
+ */
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ /* We'll read link speed through vendor specific registers down below.
+ * So remove phy_resolve_aneg_linkmode (AN on) & genphy_c45_read_pma
+ * (AN off).
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Clause 45 doesn't define 1000BaseT support. Read the link
+ * partner's 1G advertisement via Clause 22.
+ */
+ ret = phy_read(phydev, MII_STAT1000);
+ if (ret < 0)
+ return ret;
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, ret);
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ if (phydev->link) {
+ ret = phy_read(phydev, PHY_AUX_CTRL_STATUS);
+ if (ret < 0)
+ return ret;
+
+ switch (FIELD_GET(PHY_AUX_SPEED_MASK, ret)) {
+ case PHY_AUX_SPD_10:
+ phydev->speed = SPEED_10;
+ break;
+ case PHY_AUX_SPD_100:
+ phydev->speed = SPEED_100;
+ break;
+ case PHY_AUX_SPD_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case PHY_AUX_SPD_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+
+ phydev->duplex = DUPLEX_FULL;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ }
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ return RATE_MATCH_PAUSE;
+}
+
+static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
+{
+ struct pinctrl *pinctrl;
+ int ret;
+
+ switch (phydev->drv->phy_id) {
+ case MTK_2P5GPHY_ID_MT7988:
+ /* This built-in 2.5GbE hardware only sets MDIO_DEVS_PMAPMD.
+ * Set the rest by this driver since PCS/AN/VEND1/VEND2 MDIO
+ * manageable devices actually exist.
+ */
+ phydev->c45_ids.mmds_present |= MDIO_DEVS_PCS |
+ MDIO_DEVS_AN |
+ MDIO_DEVS_VEND1 |
+ MDIO_DEVS_VEND2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mt798x_2p5ge_phy_load_fw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Setup LED */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY | MTK_PHY_LED_ON_LINK10 |
+ MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK1000 |
+ MTK_PHY_LED_ON_LINK2500);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
+ MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX);
+
+ /* Switch pinctrl after setting polarity to avoid bogus blinking */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "i2p5gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.dev, "Fail to set LED pins!\n");
+
+ return 0;
+}
+
+static struct phy_driver mtk_2p5gephy_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(MTK_2P5GPHY_ID_MT7988),
+ .name = "MediaTek MT7988 2.5GbE PHY",
+ .probe = mt798x_2p5ge_phy_probe,
+ .config_init = mt798x_2p5ge_phy_config_init,
+ .config_aneg = mt798x_2p5ge_phy_config_aneg,
+ .get_features = mt798x_2p5ge_phy_get_features,
+ .read_status = mt798x_2p5ge_phy_read_status,
+ .get_rate_matching = mt798x_2p5ge_phy_get_rate_matching,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = mtk_phy_read_page,
+ .write_page = mtk_phy_write_page,
+ },
+};
+
+module_phy_driver(mtk_2p5gephy_driver);
+
+static struct mdio_device_id __maybe_unused mtk_2p5ge_phy_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x00339c00) },
+ { }
+};
+
+MODULE_DESCRIPTION("MediaTek 2.5Gb Ethernet PHY driver");
+MODULE_AUTHOR("SkyLake Huang <SkyLake.Huang@mediatek.com>");
+MODULE_LICENSE("GPL");
+
+MODULE_DEVICE_TABLE(mdio, mtk_2p5ge_phy_tbl);
+MODULE_FIRMWARE(MT7988_2P5GE_PMB_FW);
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index 175cf5239bba..cd09fbf92ef2 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -7,12 +7,17 @@
#include <linux/pinctrl/consumer.h>
#include <linux/phy.h>
#include <linux/regmap.h>
+#include <linux/of.h>
#include "../phylib.h"
#include "mtk.h"
+#define MTK_PHY_MAX_LEDS 2
+
#define MTK_GPHY_ID_MT7981 0x03a29461
#define MTK_GPHY_ID_MT7988 0x03a29481
+#define MTK_GPHY_ID_AN7581 0x03a294c1
+#define MTK_GPHY_ID_AN7583 0xc0ff0420
#define MTK_EXT_PAGE_ACCESS 0x1f
#define MTK_PHY_PAGE_STANDARD 0x0000
@@ -1319,6 +1324,7 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev)
{
struct device_node *np = dev_of_node(&phydev->mdio.bus->dev);
struct mtk_socphy_shared *shared = phy_package_get_priv(phydev);
+ struct device_node *pio_np;
struct regmap *regmap;
u32 reg;
int ret;
@@ -1336,7 +1342,13 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev)
* The 4 bits in TPBANK0 are kept as package shared data and are used to
* set LED polarity for each of the LED0.
*/
- regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,pio");
+ pio_np = of_parse_phandle(np, "mediatek,pio", 0);
+ if (!pio_np)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(pio_np);
+ of_node_put(pio_np);
+
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -1406,6 +1418,58 @@ static int mt7981_phy_probe(struct phy_device *phydev)
return mt798x_phy_calibration(phydev);
}
+static int an7581_phy_probe(struct phy_device *phydev)
+{
+ struct mtk_socphy_priv *priv;
+ struct pinctrl *pinctrl;
+
+ /* Toggle pinctrl to enable PHY LED */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.bus->dev,
+ "Failed to setup PHY LED pinctrl\n");
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int an7581_phy_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ u16 val = 0;
+ u32 mode;
+
+ if (index >= MTK_PHY_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ val = MTK_PHY_LED_ON_POLARITY;
+ break;
+ case PHY_LED_ACTIVE_HIGH:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY, val);
+}
+
+static int an7583_phy_config_init(struct phy_device *phydev)
+{
+ /* BMCR_PDOWN is enabled by default */
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
+}
+
static struct phy_driver mtk_socphy_driver[] = {
{
PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981),
@@ -1441,6 +1505,29 @@ static struct phy_driver mtk_socphy_driver[] = {
.led_hw_control_set = mt798x_phy_led_hw_control_set,
.led_hw_control_get = mt798x_phy_led_hw_control_get,
},
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581),
+ .name = "Airoha AN7581 PHY",
+ .probe = an7581_phy_probe,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
+ .led_polarity_set = an7581_phy_led_polarity_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7583),
+ .name = "Airoha AN7583 PHY",
+ .config_init = an7583_phy_config_init,
+ .probe = an7581_phy_probe,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
+ .led_polarity_set = an7581_phy_led_polarity_set,
+ },
};
module_phy_driver(mtk_socphy_driver);
@@ -1448,6 +1535,8 @@ module_phy_driver(mtk_socphy_driver);
static const struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7581) },
+ { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_AN7583) },
{ }
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 24882d30f685..64aa03aed770 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -768,7 +768,8 @@ static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
return !ret;
}
-static int ksz8051_match_phy_device(struct phy_device *phydev)
+static int ksz8051_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ksz8051_ksz8795_match_phy_device(phydev, true);
}
@@ -888,7 +889,8 @@ static int ksz8061_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
-static int ksz8795_match_phy_device(struct phy_device *phydev)
+static int ksz8795_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return ksz8051_ksz8795_match_phy_device(phydev, false);
}
@@ -2027,12 +2029,6 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
- /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
- * in this switch shall be regarded as broken.
- */
- if (phydev->dev_flags & MICREL_NO_EEE)
- phy_disable_eee(phydev);
-
return kszphy_config_init(phydev);
}
@@ -3236,10 +3232,6 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
int pulse_width;
int pin, event;
- /* Reject requests with unsupported flags */
- if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
mutex_lock(&shared->shared_lock);
event = rq->perout.index;
pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event);
@@ -3406,11 +3398,6 @@ static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
struct phy_device *phydev = shared->phydev;
int pin;
- if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
- PTP_EXTTS_EDGES |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM)
@@ -3917,6 +3904,10 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
shared->ptp_clock_info.pps = 0;
+ shared->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ shared->ptp_clock_info.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
shared->ptp_clock_info.pin_config = shared->pin_config;
shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM;
shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
@@ -5068,9 +5059,6 @@ static int lan8841_ptp_perout(struct ptp_clock_info *ptp,
int pin;
int ret;
- if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(ptp_priv->ptp_clock, PTP_PF_PEROUT, rq->perout.index);
if (pin == -1 || pin >= LAN8841_PTP_GPIO_NUM)
return -EINVAL;
@@ -5314,6 +5302,7 @@ static struct ptp_clock_info lan8841_ptp_clock_info = {
.n_per_out = LAN8841_PTP_GPIO_NUM,
.n_ext_ts = LAN8841_PTP_GPIO_NUM,
.n_pins = LAN8841_PTP_GPIO_NUM,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE,
};
#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER 3
@@ -5705,7 +5694,6 @@ static struct phy_driver ksphy_driver[] = {
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = ksz9477_resume,
- .get_features = ksz9477_get_features,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 93de88c1c8fd..13570f628aa5 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -474,6 +474,8 @@ static struct phy_driver microchip_phy_driver[] = {
/* This mask (0xfffffff2) is to differentiate from
* LAN8742 (phy_id 0x0007c130 and 0x0007c131)
* and allows future phy_id revisions.
+ * These PHYs are integrated in LAN7800 and LAN7850 USB/Ethernet
+ * controllers.
*/
.phy_id_mask = 0xfffffff2,
.name = "Microchip LAN88xx",
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
index 3e6bf10cdeed..e6514ce04c29 100644
--- a/drivers/net/phy/microchip_rds_ptp.c
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -224,10 +224,6 @@ static int mchp_rds_ptp_perout(struct ptp_clock_info *ptpci,
struct phy_device *phydev = clock->phydev;
int ret, event_pin, pulsewidth;
- /* Reject requests with unsupported flags */
- if (perout->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
event_pin = ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
perout->index);
if (event_pin != clock->event_pin)
@@ -1259,6 +1255,7 @@ struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
clock->caps.pps = 0;
clock->caps.n_pins = MCHP_RDS_PTP_N_PIN;
clock->caps.n_per_out = MCHP_RDS_PTP_N_PEROUT;
+ clock->caps.supported_perout_flags = PTP_PEROUT_DUTY_CYCLE;
clock->caps.pin_config = clock->pin_config;
clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index ed8fb14a7f21..6b800081eed5 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -946,7 +946,9 @@ static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
/* UDP checksum offset in IPv4 packet
* according to: https://tools.ietf.org/html/rfc768
*/
- val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
+ val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
+ if (enable)
+ val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
val);
@@ -1166,18 +1168,24 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
container_of(mii_ts, struct vsc8531_private, mii_ts);
if (!vsc8531->ptp->configured)
- return;
+ goto out;
- if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
- kfree_skb(skb);
- return;
- }
+ if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
+ goto out;
+
+ if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ if (ptp_msg_is_sync(skb, type))
+ goto out;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
mutex_lock(&vsc8531->ts_lock);
__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
mutex_unlock(&vsc8531->ts_lock);
+ return;
+
+out:
+ kfree_skb(skb);
}
static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
new file mode 100644
index 000000000000..ff2a3a22bd5b
--- /dev/null
+++ b/drivers/net/phy/mxl-86110.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PHY driver for Maxlinear MXL86110
+ *
+ * Copyright 2023 MaxLinear Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+/* PHY ID */
+#define PHY_ID_MXL86110 0xc1335580
+
+/* required to access extended registers */
+#define MXL86110_EXTD_REG_ADDR_OFFSET 0x1E
+#define MXL86110_EXTD_REG_ADDR_DATA 0x1F
+#define PHY_IRQ_ENABLE_REG 0x12
+#define PHY_IRQ_ENABLE_REG_WOL BIT(6)
+
+/* SyncE Configuration Register - COM_EXT SYNCE_CFG */
+#define MXL86110_EXT_SYNCE_CFG_REG 0xA012
+#define MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL BIT(4)
+#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E_DURING_LNKDN BIT(5)
+#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E BIT(6)
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK GENMASK(3, 1)
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_125M_PLL 0
+#define MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_25M 4
+
+/* MAC Address registers */
+#define MXL86110_EXT_MAC_ADDR_CFG1 0xA007
+#define MXL86110_EXT_MAC_ADDR_CFG2 0xA008
+#define MXL86110_EXT_MAC_ADDR_CFG3 0xA009
+
+#define MXL86110_EXT_WOL_CFG_REG 0xA00A
+#define MXL86110_WOL_CFG_WOL_MASK BIT(3)
+
+/* RGMII register */
+#define MXL86110_EXT_RGMII_CFG1_REG 0xA003
+/* delay can be adjusted in steps of about 150ps */
+#define MXL86110_EXT_RGMII_CFG1_RX_NO_DELAY (0x0 << 10)
+/* Closest value to 2000 ps */
+#define MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS (0xD << 10)
+#define MXL86110_EXT_RGMII_CFG1_RX_DELAY_MASK GENMASK(13, 10)
+
+#define MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS (0xD << 0)
+#define MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_MASK GENMASK(3, 0)
+
+#define MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS (0xD << 4)
+#define MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_MASK GENMASK(7, 4)
+
+#define MXL86110_EXT_RGMII_CFG1_FULL_MASK \
+ ((MXL86110_EXT_RGMII_CFG1_RX_DELAY_MASK) | \
+ (MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_MASK) | \
+ (MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_MASK))
+
+/* EXT Sleep Control register */
+#define MXL86110_UTP_EXT_SLEEP_CTRL_REG 0x27
+#define MXL86110_UTP_EXT_SLEEP_CTRL_EN_SLEEP_SW_OFF 0
+#define MXL86110_UTP_EXT_SLEEP_CTRL_EN_SLEEP_SW_MASK BIT(15)
+
+/* RGMII In-Band Status and MDIO Configuration Register */
+#define MXL86110_EXT_RGMII_MDIO_CFG 0xA005
+#define MXL86110_RGMII_MDIO_CFG_EPA0_MASK GENMASK(6, 6)
+#define MXL86110_EXT_RGMII_MDIO_CFG_EBA_MASK GENMASK(5, 5)
+#define MXL86110_EXT_RGMII_MDIO_CFG_BA_MASK GENMASK(4, 0)
+
+#define MXL86110_MAX_LEDS 3
+/* LED registers and defines */
+#define MXL86110_LED0_CFG_REG 0xA00C
+#define MXL86110_LED1_CFG_REG 0xA00D
+#define MXL86110_LED2_CFG_REG 0xA00E
+
+#define MXL86110_LEDX_CFG_BLINK BIT(13)
+#define MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON BIT(12)
+#define MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON BIT(11)
+#define MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON BIT(10)
+#define MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON BIT(9)
+#define MXL86110_LEDX_CFG_LINK_UP_TX_ON BIT(8)
+#define MXL86110_LEDX_CFG_LINK_UP_RX_ON BIT(7)
+#define MXL86110_LEDX_CFG_LINK_UP_1GB_ON BIT(6)
+#define MXL86110_LEDX_CFG_LINK_UP_100MB_ON BIT(5)
+#define MXL86110_LEDX_CFG_LINK_UP_10MB_ON BIT(4)
+#define MXL86110_LEDX_CFG_LINK_UP_COLLISION BIT(3)
+#define MXL86110_LEDX_CFG_LINK_UP_1GB_BLINK BIT(2)
+#define MXL86110_LEDX_CFG_LINK_UP_100MB_BLINK BIT(1)
+#define MXL86110_LEDX_CFG_LINK_UP_10MB_BLINK BIT(0)
+
+#define MXL86110_LED_BLINK_CFG_REG 0xA00F
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_2HZ 0
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_4HZ BIT(0)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_8HZ BIT(1)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE1_16HZ (BIT(1) | BIT(0))
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_2HZ 0
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_4HZ BIT(2)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_8HZ BIT(3)
+#define MXL86110_LED_BLINK_CFG_FREQ_MODE2_16HZ (BIT(3) | BIT(2))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_50_ON 0
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_67_ON (BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_75_ON (BIT(5))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_83_ON (BIT(5) | BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_50_OFF (BIT(6))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_33_ON (BIT(6) | BIT(4))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_25_ON (BIT(6) | BIT(5))
+#define MXL86110_LED_BLINK_CFG_DUTY_CYCLE_17_ON (BIT(6) | BIT(5) | BIT(4))
+
+/* Chip Configuration Register - COM_EXT_CHIP_CFG */
+#define MXL86110_EXT_CHIP_CFG_REG 0xA001
+#define MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE BIT(8)
+#define MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE BIT(15)
+
+/**
+ * __mxl86110_write_extended_reg() - write to a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Unlocked version of mxl86110_write_extended_reg
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative error code
+ */
+static int __mxl86110_write_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 val)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_write(phydev, MXL86110_EXTD_REG_ADDR_DATA, val);
+}
+
+/**
+ * __mxl86110_read_extended_reg - Read a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: extended register number to read (address written to reg 30)
+ *
+ * Unlocked version of mxl86110_read_extended_reg
+ *
+ * Reads the content of a PHY extended register using the MaxLinear
+ * 2-step access mechanism: write the register address to reg 30 (0x1E),
+ * then read the value from reg 31 (0x1F).
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 16-bit register value on success, or negative errno code on failure.
+ */
+static int __mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+ return __phy_read(phydev, MXL86110_EXTD_REG_ADDR_DATA);
+}
+
+/**
+ * __mxl86110_modify_extended_reg() - modify bits of a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Note: register value = (old register value & ~mask) | set.
+ * This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative error code
+ */
+static int __mxl86110_modify_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ ret = __phy_write(phydev, MXL86110_EXTD_REG_ADDR_OFFSET, regnum);
+ if (ret < 0)
+ return ret;
+
+ return __phy_modify(phydev, MXL86110_EXTD_REG_ADDR_DATA, mask, set);
+}
+
+/**
+ * mxl86110_write_extended_reg() - Write to a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * This function writes to an extended register of the PHY using the
+ * MaxLinear two-step access method (reg 0x1E/0x1F). It handles acquiring
+ * and releasing the MDIO bus lock internally.
+ *
+ * Return: 0 or negative error code
+ */
+static int mxl86110_write_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 val)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_write_extended_reg(phydev, regnum, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_read_extended_reg() - Read a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: extended register number to read
+ *
+ * This function reads from an extended register of the PHY using the
+ * MaxLinear two-step access method (reg 0x1E/0x1F). It handles acquiring
+ * and releasing the MDIO bus lock internally.
+ *
+ * Return: 16-bit register value on success, or negative errno code on failure
+ */
+static int mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_read_extended_reg(phydev, regnum);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86110_get_wol() - report if wake-on-lan is enabled
+ * @phydev: pointer to the phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ */
+static void mxl86110_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int val;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+ val = mxl86110_read_extended_reg(phydev, MXL86110_EXT_WOL_CFG_REG);
+ if (val >= 0 && (val & MXL86110_WOL_CFG_WOL_MASK))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+/**
+ * mxl86110_set_wol() - enable/disable wake-on-lan
+ * @phydev: pointer to the phy_device
+ * @wol: a pointer to a &struct ethtool_wolinfo
+ *
+ * Configures the WOL Magic Packet MAC
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *netdev;
+ const unsigned char *mac;
+ int ret = 0;
+
+ phy_lock_mdio_bus(phydev);
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ netdev = phydev->attached_dev;
+ if (!netdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Configure the MAC address of the WOL magic packet */
+ mac = netdev->dev_addr;
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG1,
+ ((mac[0] << 8) | mac[1]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG2,
+ ((mac[2] << 8) | mac[3]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_write_extended_reg(phydev,
+ MXL86110_EXT_MAC_ADDR_CFG3,
+ ((mac[4] << 8) | mac[5]));
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_WOL_CFG_REG,
+ MXL86110_WOL_CFG_WOL_MASK,
+ MXL86110_WOL_CFG_WOL_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enables Wake-on-LAN interrupt in the PHY. */
+ ret = __phy_modify(phydev, PHY_IRQ_ENABLE_REG, 0,
+ PHY_IRQ_ENABLE_REG_WOL);
+ if (ret < 0)
+ goto out;
+
+ phydev_dbg(phydev,
+ "%s, MAC Addr: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ __func__,
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ } else {
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_WOL_CFG_REG,
+ MXL86110_WOL_CFG_WOL_MASK,
+ 0);
+ if (ret < 0)
+ goto out;
+
+ /* Disables Wake-on-LAN interrupt in the PHY. */
+ ret = __phy_modify(phydev, PHY_IRQ_ENABLE_REG,
+ PHY_IRQ_ENABLE_REG_WOL, 0);
+ }
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static const unsigned long supported_trgs = (BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX));
+
+static int mxl86110_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_trgs)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mxl86110_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ val = mxl86110_read_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + index);
+ if (val < 0)
+ return val;
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON)
+ *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON)
+ *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_10MB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_100MB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (val & MXL86110_LEDX_CFG_LINK_UP_1GB_ON)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ return 0;
+}
+
+static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_10))
+ val |= MXL86110_LEDX_CFG_LINK_UP_10MB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_100))
+ val |= MXL86110_LEDX_CFG_LINK_UP_100MB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_LINK_1000))
+ val |= MXL86110_LEDX_CFG_LINK_UP_1GB_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_TX_ACT_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_RX_ACT_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_HALF_DUPLEX_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ val |= MXL86110_LEDX_CFG_LINK_UP_FULL_DUPLEX_ON;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX) ||
+ rules & BIT(TRIGGER_NETDEV_RX))
+ val |= MXL86110_LEDX_CFG_BLINK;
+
+ return mxl86110_write_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + index, val);
+}
+
+/**
+ * mxl86110_synce_clk_cfg() - applies syncE/clk output configuration
+ * @phydev: pointer to the phy_device
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_synce_clk_cfg(struct phy_device *phydev)
+{
+ u16 mask = 0, val = 0;
+
+ /*
+ * Configures the clock output to its default
+ * setting as per the datasheet.
+ * This results in a 25MHz clock output being selected in the
+ * COM_EXT_SYNCE_CFG register for SyncE configuration.
+ */
+ val = MXL86110_EXT_SYNCE_CFG_EN_SYNC_E |
+ FIELD_PREP(MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK,
+ MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_25M);
+ mask = MXL86110_EXT_SYNCE_CFG_EN_SYNC_E |
+ MXL86110_EXT_SYNCE_CFG_CLK_SRC_SEL_MASK |
+ MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL;
+
+ /* Write clock output configuration */
+ return __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_SYNCE_CFG_REG,
+ mask, val);
+}
+
+/**
+ * mxl86110_broadcast_cfg - Configure MDIO broadcast setting for PHY
+ * @phydev: Pointer to the PHY device structure
+ *
+ * This function configures the MDIO broadcast behavior of the MxL86110 PHY.
+ * Currently, broadcast mode is explicitly disabled by clearing the EPA0 bit
+ * in the RGMII_MDIO_CFG extended register.
+ *
+ * Note: This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 on success or a negative errno code on failure.
+ */
+static int mxl86110_broadcast_cfg(struct phy_device *phydev)
+{
+ return __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_RGMII_MDIO_CFG,
+ MXL86110_RGMII_MDIO_CFG_EPA0_MASK,
+ 0);
+}
+
+/**
+ * mxl86110_enable_led_activity_blink - Enable LEDs activity blink on PHY
+ * @phydev: Pointer to the PHY device structure
+ *
+ * Configure all PHY LEDs to blink on traffic activity regardless of whether
+ * they are ON or OFF. This behavior allows each LED to serve as a pure activity
+ * indicator, independently of its use as a link status indicator.
+ *
+ * By default, each LED blinks only when it is also in the ON state.
+ * This function modifies the appropriate registers (LABx fields)
+ * to enable blinking even when the LEDs are OFF, to allow the LED to be used
+ * as a traffic indicator without requiring it to also serve
+ * as a link status LED.
+ *
+ * Note: Any further LED customization can be performed via the
+ * /sys/class/leds interface; the functions led_hw_is_supported,
+ * led_hw_control_get, and led_hw_control_set are used
+ * to support this mechanism.
+ *
+ * This function assumes the caller already holds the MDIO bus lock
+ * or otherwise has exclusive access to the PHY.
+ *
+ * Return: 0 on success or a negative errno code on failure.
+ */
+static int mxl86110_enable_led_activity_blink(struct phy_device *phydev)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < MXL86110_MAX_LEDS; i++) {
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_LED0_CFG_REG + i,
+ 0,
+ MXL86110_LEDX_CFG_BLINK);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * mxl86110_config_init() - initialize the PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_config_init(struct phy_device *phydev)
+{
+ u16 val = 0;
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val = MXL86110_EXT_RGMII_CFG1_TX_1G_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_TX_10MB_100MB_DELAY_1950PS |
+ MXL86110_EXT_RGMII_CFG1_RX_DELAY_1950PS;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_RGMII_CFG1_REG,
+ MXL86110_EXT_RGMII_CFG1_FULL_MASK,
+ val);
+ if (ret < 0)
+ goto out;
+
+ /* Configure RXDLY (RGMII Rx Clock Delay) to disable
+ * the default additional delay value on RX_CLK
+ * (2 ns for 125 MHz, 8 ns for 25 MHz/2.5 MHz)
+ * and use just the digital one selected before
+ */
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86110_EXT_CHIP_CFG_REG,
+ MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE,
+ 0);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_enable_led_activity_blink(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_broadcast_cfg(phydev);
+
+out:
+ phy_unlock_mdio_bus(phydev);
+ return ret;
+}
+
+static struct phy_driver mxl_phy_drvs[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
+ .name = "MXL86110 Gigabit Ethernet",
+ .config_init = mxl86110_config_init,
+ .get_wol = mxl86110_get_wol,
+ .set_wol = mxl86110_set_wol,
+ .led_hw_is_supported = mxl86110_led_hw_is_supported,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
+ },
+};
+
+module_phy_driver(mxl_phy_drvs);
+
+static const struct mdio_device_id __maybe_unused mxl_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_MXL86110) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, mxl_tbl);
+
+MODULE_DESCRIPTION("MaxLinear MXL86110 PHY driver");
+MODULE_AUTHOR("Stefano Radaelli");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 250a018d5546..4c6d905f0a9f 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -19,7 +19,6 @@
#include "nxp-c45-tja11xx.h"
-#define PHY_ID_MASK GENMASK(31, 4)
/* Same id: TJA1103, TJA1104 */
#define PHY_ID_TJA_1103 0x001BB010
/* Same id: TJA1120, TJA1121 */
@@ -763,9 +762,6 @@ static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
struct phy_device *phydev = priv->phydev;
int pin;
- if (perout->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
if (pin < 0)
return pin;
@@ -861,12 +857,6 @@ static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
int pin;
- if (extts->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
/* Sampling on both edges is not supported */
if ((extts->flags & PTP_RISING_EDGE) &&
(extts->flags & PTP_FALLING_EDGE) &&
@@ -962,6 +952,10 @@ static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
.n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
.n_ext_ts = 1,
.n_per_out = 1,
+ .supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
+ .supported_perout_flags = PTP_PEROUT_PHASE,
};
priv->ptp_clock = ptp_clock_register(&priv->caps,
@@ -1971,28 +1965,24 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev)
return macsec_ability;
}
-static int tja1103_match_phy_device(struct phy_device *phydev)
+static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
- !nxp_c45_macsec_ability(phydev);
-}
+ if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask))
+ return 0;
-static int tja1104_match_phy_device(struct phy_device *phydev)
-{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
- nxp_c45_macsec_ability(phydev);
+ return !nxp_c45_macsec_ability(phydev);
}
-static int tja1120_match_phy_device(struct phy_device *phydev)
+static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
- !nxp_c45_macsec_ability(phydev);
-}
+ if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask))
+ return 0;
-static int tja1121_match_phy_device(struct phy_device *phydev)
-{
- return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
- nxp_c45_macsec_ability(phydev);
+ return nxp_c45_macsec_ability(phydev);
}
static const struct nxp_c45_regmap tja1120_regmap = {
@@ -2065,6 +2055,7 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
static struct phy_driver nxp_c45_driver[] = {
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1103",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -2086,9 +2077,10 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1103_match_phy_device,
+ .match_phy_device = tja11xx_no_macsec_match_phy_device,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1104",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -2110,9 +2102,10 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1104_match_phy_device,
+ .match_phy_device = tja11xx_macsec_match_phy_device,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
.name = "NXP C45 TJA1120",
.get_features = nxp_c45_get_features,
.driver_data = &tja1120_phy_data,
@@ -2135,9 +2128,10 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1120_match_phy_device,
+ .match_phy_device = tja11xx_no_macsec_match_phy_device,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
.name = "NXP C45 TJA1121",
.get_features = nxp_c45_get_features,
.driver_data = &tja1120_phy_data,
@@ -2160,7 +2154,7 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
- .match_phy_device = tja1121_match_phy_device,
+ .match_phy_device = tja11xx_macsec_match_phy_device,
},
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 07e94a2478ac..3c38a8ddae2f 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -651,12 +651,14 @@ static int tja1102_match_phy_device(struct phy_device *phydev, bool port0)
return !ret;
}
-static int tja1102_p0_match_phy_device(struct phy_device *phydev)
+static int tja1102_p0_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return tja1102_match_phy_device(phydev, true);
}
-static int tja1102_p1_match_phy_device(struct phy_device *phydev)
+static int tja1102_p1_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return tja1102_match_phy_device(phydev, false);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index cc1bfd22fb81..73f9cb2e2844 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -543,20 +543,26 @@ static int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-static int phy_bus_match(struct device *dev, const struct device_driver *drv)
+/**
+ * genphy_match_phy_device - match a PHY device with a PHY driver
+ * @phydev: target phy_device struct
+ * @phydrv: target phy_driver struct
+ *
+ * Description: Checks whether the given PHY device matches the specified
+ * PHY driver. For Clause 45 PHYs, iterates over the available device
+ * identifiers and compares them against the driver's expected PHY ID,
+ * applying the provided mask. For Clause 22 PHYs, a direct ID comparison
+ * is performed.
+ *
+ * Return: 1 if the PHY device matches the driver, 0 otherwise.
+ */
+int genphy_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
- struct phy_device *phydev = to_phy_device(dev);
- const struct phy_driver *phydrv = to_phy_driver(drv);
- const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
- int i;
-
- if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY))
- return 0;
-
- if (phydrv->match_phy_device)
- return phydrv->match_phy_device(phydev);
-
if (phydev->is_c45) {
+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+ int i;
+
for (i = 1; i < num_ids; i++) {
if (phydev->c45_ids.device_ids[i] == 0xffffffff)
continue;
@@ -565,11 +571,27 @@ static int phy_bus_match(struct device *dev, const struct device_driver *drv)
phydrv->phy_id, phydrv->phy_id_mask))
return 1;
}
+
return 0;
- } else {
- return phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask);
}
+
+ return phy_id_compare(phydev->phy_id, phydrv->phy_id,
+ phydrv->phy_id_mask);
+}
+EXPORT_SYMBOL_GPL(genphy_match_phy_device);
+
+static int phy_bus_match(struct device *dev, const struct device_driver *drv)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ const struct phy_driver *phydrv = to_phy_driver(drv);
+
+ if (!(phydrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY))
+ return 0;
+
+ if (phydrv->match_phy_device)
+ return phydrv->match_phy_device(phydev, phydrv);
+
+ return genphy_match_phy_device(phydev, phydrv);
}
static ssize_t
@@ -1727,8 +1749,10 @@ void phy_detach(struct phy_device *phydev)
struct module *ndev_owner = NULL;
struct mii_bus *bus;
- if (phydev->devlink)
+ if (phydev->devlink) {
device_link_del(phydev->devlink);
+ phydev->devlink = NULL;
+ }
if (phydev->sysfs_links) {
if (dev)
@@ -2975,6 +2999,21 @@ int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+/**
+ * phy_get_mac_termination - stores MAC termination in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @val: MAC termination
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_mac_termination(struct phy_device *phydev, struct device *dev,
+ u32 *val)
+{
+ return phy_get_u32_property(dev, "mac-termination-ohms", val);
+}
+EXPORT_SYMBOL_GPL(phy_get_mac_termination);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3236,18 +3275,6 @@ struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
EXPORT_SYMBOL(fwnode_phy_find_device);
/**
- * device_phy_find_device - For the given device, get the phy_device
- * @dev: Pointer to the given device
- *
- * Refer return conditions of fwnode_phy_find_device().
- */
-struct phy_device *device_phy_find_device(struct device *dev)
-{
- return fwnode_phy_find_device(dev_fwnode(dev));
-}
-EXPORT_SYMBOL_GPL(device_phy_find_device);
-
-/**
* fwnode_get_phy_node - Get the phy_node using the named reference.
* @fwnode: Pointer to fwnode from which phy_node has to be obtained.
*
@@ -3262,12 +3289,12 @@ struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode)
/* Only phy-handle is used for ACPI */
phy_node = fwnode_find_reference(fwnode, "phy-handle", 0);
- if (is_acpi_node(fwnode) || !IS_ERR(phy_node))
+ if (!IS_ERR(phy_node) || is_acpi_node(fwnode))
return phy_node;
phy_node = fwnode_find_reference(fwnode, "phy", 0);
- if (IS_ERR(phy_node))
- phy_node = fwnode_find_reference(fwnode, "phy-device", 0);
- return phy_node;
+ if (!IS_ERR(phy_node))
+ return phy_node;
+ return fwnode_find_reference(fwnode, "phy-device", 0);
}
EXPORT_SYMBOL_GPL(fwnode_get_phy_node);
@@ -3554,19 +3581,15 @@ static int __init phy_init(void)
phylib_register_stubs();
rtnl_unlock();
- rc = mdio_bus_init();
- if (rc)
- goto err_ethtool_phy_ops;
-
rc = phy_caps_init();
if (rc)
- goto err_mdio_bus;
+ goto err_ethtool_phy_ops;
features_init();
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
if (rc)
- goto err_mdio_bus;
+ goto err_ethtool_phy_ops;
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
if (rc)
@@ -3576,8 +3599,6 @@ static int __init phy_init(void)
err_c45:
phy_driver_unregister(&genphy_c45_driver);
-err_mdio_bus:
- mdio_bus_exit();
err_ethtool_phy_ops:
rtnl_lock();
phylib_unregister_stubs();
@@ -3591,7 +3612,6 @@ static void __exit phy_exit(void)
{
phy_driver_unregister(&genphy_c45_driver);
phy_driver_unregister(&genphy_driver);
- mdio_bus_exit();
rtnl_lock();
phylib_unregister_stubs();
ethtool_set_ethtool_phy_ops(NULL);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 1bdd5d8bb5b0..0faa3d97e06b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -24,13 +24,6 @@
#include "sfp.h"
#include "swphy.h"
-#define SUPPORTED_INTERFACES \
- (SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE | \
- SUPPORTED_BNC | SUPPORTED_AUI | SUPPORTED_Backplane)
-#define ADVERTISED_INTERFACES \
- (ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_FIBRE | \
- ADVERTISED_BNC | ADVERTISED_AUI | ADVERTISED_Backplane)
-
enum {
PHYLINK_DISABLE_STOPPED,
PHYLINK_DISABLE_LINK,
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 893c82479671..c3dcb6257430 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -17,9 +18,15 @@
#include "realtek.h"
-#define RTL821x_PHYSR 0x11
-#define RTL821x_PHYSR_DUPLEX BIT(13)
-#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
+#define RTL8201F_IER 0x13
+
+#define RTL8201F_ISR 0x1e
+#define RTL8201F_ISR_ANERR BIT(15)
+#define RTL8201F_ISR_DUPLEX BIT(13)
+#define RTL8201F_ISR_LINK BIT(11)
+#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
+ RTL8201F_ISR_DUPLEX | \
+ RTL8201F_ISR_LINK)
#define RTL821x_INER 0x12
#define RTL8211B_INER_INIT 0x6400
@@ -29,15 +36,48 @@
#define RTL821x_INSR 0x13
#define RTL821x_EXT_PAGE_SELECT 0x1e
+
#define RTL821x_PAGE_SELECT 0x1f
+#define RTL821x_SET_EXT_PAGE 0x07
+
+/* RTL8211E extension page 44/0x2c */
+#define RTL8211E_LEDCR_EXT_PAGE 0x2c
+#define RTL8211E_LEDCR1 0x1a
+#define RTL8211E_LEDCR1_ACT_TXRX BIT(4)
+#define RTL8211E_LEDCR1_MASK BIT(4)
+#define RTL8211E_LEDCR1_SHIFT 1
+
+#define RTL8211E_LEDCR2 0x1c
+#define RTL8211E_LEDCR2_LINK_1000 BIT(2)
+#define RTL8211E_LEDCR2_LINK_100 BIT(1)
+#define RTL8211E_LEDCR2_LINK_10 BIT(0)
+#define RTL8211E_LEDCR2_MASK GENMASK(2, 0)
+#define RTL8211E_LEDCR2_SHIFT 4
+
+/* RTL8211E extension page 164/0xa4 */
+#define RTL8211E_RGMII_EXT_PAGE 0xa4
+#define RTL8211E_RGMII_DELAY 0x1c
+#define RTL8211E_CTRL_DELAY BIT(13)
+#define RTL8211E_TX_DELAY BIT(12)
+#define RTL8211E_RX_DELAY BIT(11)
+#define RTL8211E_DELAY_MASK GENMASK(13, 11)
+/* RTL8211F PHY configuration */
+#define RTL8211F_PHYCR_PAGE 0xa43
#define RTL8211F_PHYCR1 0x18
+#define RTL8211F_ALDPS_PLL_OFF BIT(1)
+#define RTL8211F_ALDPS_ENABLE BIT(2)
+#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+
#define RTL8211F_PHYCR2 0x19
#define RTL8211F_CLKOUT_EN BIT(0)
#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5)
+#define RTL8211F_INSR_PAGE 0xa43
#define RTL8211F_INSR 0x1d
+/* RTL8211F LED configuration */
+#define RTL8211F_LEDCR_PAGE 0xd04
#define RTL8211F_LEDCR 0x10
#define RTL8211F_LEDCR_MODE BIT(15)
#define RTL8211F_LEDCR_ACT_TXRX BIT(4)
@@ -47,25 +87,32 @@
#define RTL8211F_LEDCR_MASK GENMASK(4, 0)
#define RTL8211F_LEDCR_SHIFT 5
+/* RTL8211F RGMII configuration */
+#define RTL8211F_RGMII_PAGE 0xd08
+
+#define RTL8211F_TXCR 0x11
#define RTL8211F_TX_DELAY BIT(8)
+
+#define RTL8211F_RXCR 0x15
#define RTL8211F_RX_DELAY BIT(3)
-#define RTL8211F_ALDPS_PLL_OFF BIT(1)
-#define RTL8211F_ALDPS_ENABLE BIT(2)
-#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+/* RTL8211F WOL interrupt configuration */
+#define RTL8211F_INTBCR_PAGE 0xd40
+#define RTL8211F_INTBCR 0x16
+#define RTL8211F_INTBCR_INTB_PMEB BIT(5)
-#define RTL8211E_CTRL_DELAY BIT(13)
-#define RTL8211E_TX_DELAY BIT(12)
-#define RTL8211E_RX_DELAY BIT(11)
+/* RTL8211F WOL settings */
+#define RTL8211F_WOL_SETTINGS_PAGE 0xd8a
+#define RTL8211F_WOL_SETTINGS_EVENTS 16
+#define RTL8211F_WOL_EVENT_MAGIC BIT(12)
+#define RTL8211F_WOL_SETTINGS_STATUS 17
+#define RTL8211F_WOL_STATUS_RESET (BIT(15) | 0x1fff)
-#define RTL8201F_ISR 0x1e
-#define RTL8201F_ISR_ANERR BIT(15)
-#define RTL8201F_ISR_DUPLEX BIT(13)
-#define RTL8201F_ISR_LINK BIT(11)
-#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
- RTL8201F_ISR_DUPLEX | \
- RTL8201F_ISR_LINK)
-#define RTL8201F_IER 0x13
+/* RTL8211F Unique phyiscal and multicast address (WOL) */
+#define RTL8211F_PHYSICAL_ADDR_PAGE 0xd8c
+#define RTL8211F_PHYSICAL_ADDR_WORD0 16
+#define RTL8211F_PHYSICAL_ADDR_WORD1 17
+#define RTL8211F_PHYSICAL_ADDR_WORD2 18
#define RTL822X_VND1_SERDES_OPTION 0x697a
#define RTL822X_VND1_SERDES_OPTION_MODE_MASK GENMASK(5, 0)
@@ -111,8 +158,10 @@
#define RTL_8221B_VB_CG 0x001cc849
#define RTL_8221B_VN_CG 0x001cc84a
#define RTL_8251B 0x001cc862
+#define RTL_8261C 0x001cc890
-#define RTL8211F_LED_COUNT 3
+/* RTL8211E and RTL8211F support up to three LEDs */
+#define RTL8211x_LED_COUNT 3
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -123,6 +172,7 @@ struct rtl821x_priv {
u16 phycr2;
bool has_phycr2;
struct clk *clk;
+ u32 saved_wolopts;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -135,6 +185,36 @@ static int rtl821x_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
}
+static int rtl821x_read_ext_page(struct phy_device *phydev, u16 ext_page,
+ u32 regnum)
+{
+ int oldpage, ret = 0;
+
+ oldpage = phy_select_page(phydev, RTL821x_SET_EXT_PAGE);
+ if (oldpage >= 0) {
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, ext_page);
+ if (ret == 0)
+ ret = __phy_read(phydev, regnum);
+ }
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
+static int rtl821x_modify_ext_page(struct phy_device *phydev, u16 ext_page,
+ u32 regnum, u16 mask, u16 set)
+{
+ int oldpage, ret = 0;
+
+ oldpage = phy_select_page(phydev, RTL821x_SET_EXT_PAGE);
+ if (oldpage >= 0) {
+ ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, ext_page);
+ if (ret == 0)
+ ret = __phy_modify(phydev, regnum, mask, set);
+ }
+
+ return phy_restore_page(phydev, oldpage, ret);
+}
+
static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -151,7 +231,7 @@ static int rtl821x_probe(struct phy_device *phydev)
return dev_err_probe(dev, PTR_ERR(priv->clk),
"failed to get phy clock\n");
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR1);
+ ret = phy_read_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1);
if (ret < 0)
return ret;
@@ -161,7 +241,7 @@ static int rtl821x_probe(struct phy_device *phydev)
priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
if (priv->has_phycr2) {
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ ret = phy_read_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2);
if (ret < 0)
return ret;
@@ -197,7 +277,7 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- err = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ err = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
return (err < 0) ? err : 0;
}
@@ -340,7 +420,7 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
- irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ irq_status = phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
@@ -354,6 +434,53 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static void rtl8211f_get_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_MAGIC;
+ if (phy_read_paged(dev, RTL8211F_WOL_SETTINGS_PAGE, RTL8211F_WOL_SETTINGS_EVENTS)
+ & RTL8211F_WOL_EVENT_MAGIC)
+ wol->wolopts = WAKE_MAGIC;
+}
+
+static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
+{
+ const u8 *mac_addr = dev->attached_dev->dev_addr;
+ int oldpage;
+
+ oldpage = phy_save_page(dev);
+ if (oldpage < 0)
+ goto err;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Store the device address for the magic packet */
+ rtl821x_write_page(dev, RTL8211F_PHYSICAL_ADDR_PAGE);
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD0, mac_addr[1] << 8 | (mac_addr[0]));
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD1, mac_addr[3] << 8 | (mac_addr[2]));
+ __phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD2, mac_addr[5] << 8 | (mac_addr[4]));
+
+ /* Enable magic packet matching and reset WOL status */
+ rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, RTL8211F_WOL_EVENT_MAGIC);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
+
+ /* Enable the WOL interrupt */
+ rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
+ __phy_set_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
+ } else {
+ /* Disable the WOL interrupt */
+ rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
+ __phy_clear_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
+
+ /* Disable magic packet matching and reset WOL status */
+ rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, 0);
+ __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
+ }
+
+err:
+ return phy_restore_page(dev, oldpage, 0);
+}
+
static int rtl8211_config_aneg(struct phy_device *phydev)
{
int ret;
@@ -390,7 +517,7 @@ static int rtl8211f_config_init(struct phy_device *phydev)
u16 val_txdly, val_rxdly;
int ret;
- ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR1,
RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
priv->phycr1);
if (ret < 0) {
@@ -424,7 +551,8 @@ static int rtl8211f_config_init(struct phy_device *phydev)
return 0;
}
- ret = phy_modify_paged_changed(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
+ RTL8211F_TXCR, RTL8211F_TX_DELAY,
val_txdly);
if (ret < 0) {
dev_err(dev, "Failed to update the TX delay register\n");
@@ -439,7 +567,8 @@ static int rtl8211f_config_init(struct phy_device *phydev)
str_enabled_disabled(val_txdly));
}
- ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY,
+ ret = phy_modify_paged_changed(phydev, RTL8211F_RGMII_PAGE,
+ RTL8211F_RXCR, RTL8211F_RX_DELAY,
val_rxdly);
if (ret < 0) {
dev_err(dev, "Failed to update the RX delay register\n");
@@ -455,14 +584,15 @@ static int rtl8211f_config_init(struct phy_device *phydev)
}
/* Disable PHY-mode EEE so LPI is passed to the MAC */
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
if (ret)
return ret;
if (priv->has_phycr2) {
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
- RTL8211F_CLKOUT_EN, priv->phycr2);
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
+ RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN,
+ priv->phycr2);
if (ret < 0) {
dev_err(dev, "clkout configuration failed: %pe\n",
ERR_PTR(ret));
@@ -509,7 +639,7 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
-static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
+static int rtl8211x_led_hw_is_supported(struct phy_device *phydev, u8 index,
unsigned long rules)
{
const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
@@ -528,9 +658,11 @@ static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
* rates and Active indication always at all three 10+100+1000
* link rates.
* This code currently uses mode B only.
+ *
+ * RTL8211E PHY LED has one mode, which works like RTL8211F mode B.
*/
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
/* Filter out any other unsupported triggers. */
@@ -549,7 +681,7 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
{
int val;
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR);
@@ -560,17 +692,17 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
val &= RTL8211F_LEDCR_MASK;
if (val & RTL8211F_LEDCR_LINK_10)
- set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
if (val & RTL8211F_LEDCR_LINK_100)
- set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
if (val & RTL8211F_LEDCR_LINK_1000)
- set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
if (val & RTL8211F_LEDCR_ACT_TXRX) {
- set_bit(TRIGGER_NETDEV_RX, rules);
- set_bit(TRIGGER_NETDEV_TX, rules);
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+ __set_bit(TRIGGER_NETDEV_TX, rules);
}
return 0;
@@ -582,7 +714,7 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index);
u16 reg = 0;
- if (index >= RTL8211F_LED_COUNT)
+ if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
@@ -605,9 +737,86 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
}
+static int rtl8211e_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int ret;
+ u16 cr1, cr2;
+
+ if (index >= RTL8211x_LED_COUNT)
+ return -EINVAL;
+
+ ret = rtl821x_read_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR1);
+ if (ret < 0)
+ return ret;
+
+ cr1 = ret >> RTL8211E_LEDCR1_SHIFT * index;
+ if (cr1 & RTL8211E_LEDCR1_ACT_TXRX) {
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+ __set_bit(TRIGGER_NETDEV_TX, rules);
+ }
+
+ ret = rtl821x_read_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR2);
+ if (ret < 0)
+ return ret;
+
+ cr2 = ret >> RTL8211E_LEDCR2_SHIFT * index;
+ if (cr2 & RTL8211E_LEDCR2_LINK_10)
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ if (cr2 & RTL8211E_LEDCR2_LINK_100)
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (cr2 & RTL8211E_LEDCR2_LINK_1000)
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ return ret;
+}
+
+static int rtl8211e_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const u16 cr1mask =
+ RTL8211E_LEDCR1_MASK << (RTL8211E_LEDCR1_SHIFT * index);
+ const u16 cr2mask =
+ RTL8211E_LEDCR2_MASK << (RTL8211E_LEDCR2_SHIFT * index);
+ u16 cr1 = 0, cr2 = 0;
+ int ret;
+
+ if (index >= RTL8211x_LED_COUNT)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
+ test_bit(TRIGGER_NETDEV_TX, &rules)) {
+ cr1 |= RTL8211E_LEDCR1_ACT_TXRX;
+ }
+
+ cr1 <<= RTL8211E_LEDCR1_SHIFT * index;
+ ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR1, cr1mask, cr1);
+ if (ret < 0)
+ return ret;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ cr2 |= RTL8211E_LEDCR2_LINK_10;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ cr2 |= RTL8211E_LEDCR2_LINK_100;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ cr2 |= RTL8211E_LEDCR2_LINK_1000;
+
+ cr2 <<= RTL8211E_LEDCR2_SHIFT * index;
+ ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
+ RTL8211E_LEDCR2, cr2mask, cr2);
+
+ return ret;
+}
+
static int rtl8211e_config_init(struct phy_device *phydev)
{
- int ret = 0, oldpage;
u16 val;
/* enable TX/RX delay for rgmii-* modes, and disable them for rgmii. */
@@ -637,20 +846,9 @@ static int rtl8211e_config_init(struct phy_device *phydev)
* 12 = RX Delay, 11 = TX Delay
* 10:0 = Test && debug settings reserved by realtek
*/
- oldpage = phy_select_page(phydev, 0x7);
- if (oldpage < 0)
- goto err_restore_page;
-
- ret = __phy_write(phydev, RTL821x_EXT_PAGE_SELECT, 0xa4);
- if (ret)
- goto err_restore_page;
-
- ret = __phy_modify(phydev, 0x1c, RTL8211E_CTRL_DELAY
- | RTL8211E_TX_DELAY | RTL8211E_RX_DELAY,
- val);
-
-err_restore_page:
- return phy_restore_page(phydev, oldpage, ret);
+ return rtl821x_modify_ext_page(phydev, RTL8211E_RGMII_EXT_PAGE,
+ RTL8211E_RGMII_DELAY,
+ RTL8211E_DELAY_MASK, val);
}
static int rtl8211b_suspend(struct phy_device *phydev)
@@ -1117,13 +1315,15 @@ static bool rtlgen_supports_mmd(struct phy_device *phydev)
return val > 0;
}
-static int rtlgen_match_phy_device(struct phy_device *phydev)
+static int rtlgen_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
!rtlgen_supports_2_5gbps(phydev);
}
-static int rtl8226_match_phy_device(struct phy_device *phydev)
+static int rtl8226_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
rtlgen_supports_2_5gbps(phydev) &&
@@ -1139,32 +1339,38 @@ static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
return !is_c45 && (id == phydev->phy_id);
}
-static int rtl8221b_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev);
}
-static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
}
-static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
}
-static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
+static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
if (phydev->is_c45)
return false;
@@ -1173,6 +1379,7 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
case RTL_GENERIC_PHYID:
case RTL_8221B:
case RTL_8251B:
+ case RTL_8261C:
case 0x001cc841:
break;
default:
@@ -1182,7 +1389,8 @@ static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev)
return rtlgen_supports_2_5gbps(phydev) && !rtlgen_supports_mmd(phydev);
}
-static int rtl8251b_c45_match_phy_device(struct phy_device *phydev)
+static int rtl8251b_c45_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return rtlgen_is_c45_match(phydev, RTL_8251B, true);
}
@@ -1392,6 +1600,9 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .led_hw_is_supported = rtl8211x_led_hw_is_supported,
+ .led_hw_control_get = rtl8211e_led_hw_control_get,
+ .led_hw_control_set = rtl8211e_led_hw_control_set,
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
@@ -1400,12 +1611,14 @@ static struct phy_driver realtek_drvs[] = {
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
+ .set_wol = rtl8211f_set_wol,
+ .get_wol = rtl8211f_get_wol,
.suspend = rtl821x_suspend,
.resume = rtl821x_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
- .led_hw_is_supported = rtl8211f_led_hw_is_supported,
+ .led_hw_is_supported = rtl8211x_led_hw_is_supported,
.led_hw_control_get = rtl8211f_led_hw_control_get,
.led_hw_control_set = rtl8211f_led_hw_control_set,
}, {
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 752d4bf7bb99..46c5ff7d7b56 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -67,7 +67,8 @@ static int teranetics_read_status(struct phy_device *phydev)
return 0;
}
-static int teranetics_match_phy_device(struct phy_device *phydev)
+static int teranetics_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
{
return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 53463767cc43..def84e87e05b 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1131,6 +1131,8 @@ static const struct file_operations ppp_device_fops = {
.llseek = noop_llseek,
};
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head);
+
static __net_init int ppp_init_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
@@ -1146,28 +1148,20 @@ static __net_init int ppp_init_net(struct net *net)
return 0;
}
-static __net_exit void ppp_exit_net(struct net *net)
+static __net_exit void ppp_exit_rtnl_net(struct net *net,
+ struct list_head *dev_to_kill)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
- struct net_device *dev;
- struct net_device *aux;
struct ppp *ppp;
- LIST_HEAD(list);
int id;
- rtnl_lock();
- for_each_netdev_safe(net, dev, aux) {
- if (dev->netdev_ops == &ppp_netdev_ops)
- unregister_netdevice_queue(dev, &list);
- }
-
idr_for_each_entry(&pn->units_idr, ppp, id)
- /* Skip devices already unregistered by previous loop */
- if (!net_eq(dev_net(ppp->dev), net))
- unregister_netdevice_queue(ppp->dev, &list);
+ ppp_nl_dellink(ppp->dev, dev_to_kill);
+}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+static __net_exit void ppp_exit_net(struct net *net)
+{
+ struct ppp_net *pn = net_generic(net, ppp_net_id);
mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
@@ -1177,6 +1171,7 @@ static __net_exit void ppp_exit_net(struct net *net)
static struct pernet_operations ppp_net_ops = {
.init = ppp_init_net,
+ .exit_rtnl = ppp_exit_rtnl_net,
.exit = ppp_exit_net,
.id = &ppp_net_id,
.size = sizeof(struct ppp_net),
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index d4ece538f1b2..bdf0788d8e66 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -923,7 +923,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
unsigned int __user *up = argp;
unsigned short u;
int __user *sp = argp;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
int s;
int ret;
@@ -1000,16 +1000,17 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
}
ret = 0;
- dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
+ dev_get_mac_address((struct sockaddr *)&ss, dev_net(tap->dev),
+ tap->dev->name);
if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
- copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
+ copy_to_user(&ifr->ifr_hwaddr, &ss, sizeof(ifr->ifr_hwaddr)))
ret = -EFAULT;
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
case SIOCSIFHWADDR:
- if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+ if (copy_from_user(&ss, &ifr->ifr_hwaddr, sizeof(ifr->ifr_hwaddr)))
return -EFAULT;
rtnl_lock();
tap = tap_get_tap_dev(q);
@@ -1017,7 +1018,10 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return -ENOLINK;
}
- ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
+ if (tap->dev->addr_len > sizeof(ifr->ifr_hwaddr))
+ ret = -EINVAL;
+ else
+ ret = dev_set_mac_address_user(tap->dev, &ss, NULL);
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index d8fc0c79745d..8bc56186b2a3 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -55,7 +55,7 @@ static int __set_port_dev_addr(struct net_device *port_dev,
memcpy(addr.__data, dev_addr, port_dev->addr_len);
addr.ss_family = port_dev->type;
- return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
+ return dev_set_mac_address(port_dev, &addr, NULL);
}
static int team_port_set_orig_dev_addr(struct team_port *port)
@@ -1778,8 +1778,8 @@ static void team_change_rx_flags(struct net_device *dev, int change)
struct team_port *port;
int inc;
- rcu_read_lock();
- list_for_each_entry_rcu(port, &team->port_list, list) {
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
if (change & IFF_PROMISC) {
inc = dev->flags & IFF_PROMISC ? 1 : -1;
dev_set_promiscuity(port->dev, inc);
@@ -1789,7 +1789,7 @@ static void team_change_rx_flags(struct net_device *dev, int change)
dev_set_allmulti(port->dev, inc);
}
}
- rcu_read_unlock();
+ mutex_unlock(&team->lock);
}
static void team_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7babd1e9a378..1207196cbbed 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3193,7 +3193,13 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCSIFHWADDR:
/* Set hw address */
- ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
+ if (tun->dev->addr_len > sizeof(ifr.ifr_hwaddr)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = dev_set_mac_address_user(tun->dev,
+ (struct sockaddr_storage *)&ifr.ifr_hwaddr,
+ NULL);
break;
case TUNGETSNDBUF:
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3c360d4f0635..370b32fc2588 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -101,9 +101,7 @@ config USB_RTL8152
select MII
select PHYLIB
select CRC32
- select CRYPTO
- select CRYPTO_HASH
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
help
This option adds support for Realtek RTL8152 based USB 2.0
10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index ff5be2cbf17b..453a2cf82753 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -30,10 +30,13 @@ static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+ }
return ret;
}
@@ -46,10 +49,13 @@ static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, data, size);
- if (unlikely(ret < 0))
+ if (unlikely(ret < size)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net,
"Failed to read(0x%x) reg index 0x%04x: %d\n",
cmd, index, ret);
+ }
return ret;
}
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 74162190bccc..8531b804021a 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -224,7 +224,6 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
-void asix_adjust_link(struct net_device *netdev);
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 72ffc89b477a..7fd763917ae2 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -414,28 +414,6 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
return ret;
}
-/* set MAC link settings according to information from phylib */
-void asix_adjust_link(struct net_device *netdev)
-{
- struct phy_device *phydev = netdev->phydev;
- struct usbnet *dev = netdev_priv(netdev);
- u16 mode = 0;
-
- if (phydev->link) {
- mode = AX88772_MEDIUM_DEFAULT;
-
- if (phydev->duplex == DUPLEX_HALF)
- mode &= ~AX_MEDIUM_FD;
-
- if (phydev->speed != SPEED_100)
- mode &= ~AX_MEDIUM_PS;
- }
-
- asix_write_medium_mode(dev, mode, 0);
- phy_print_status(phydev);
- usbnet_link_change(dev, phydev->link, 0);
-}
-
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index da24941a6e44..9b0318fb50b5 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -752,7 +752,6 @@ static void ax88772_mac_link_down(struct phylink_config *config,
struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
asix_write_medium_mode(dev, 0, 0);
- usbnet_link_change(dev, false, false);
}
static void ax88772_mac_link_up(struct phylink_config *config,
@@ -783,7 +782,6 @@ static void ax88772_mac_link_up(struct phylink_config *config,
m |= AX_MEDIUM_RFC;
asix_write_medium_mode(dev, m, 0);
- usbnet_link_change(dev, true, false);
}
static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
@@ -1350,10 +1348,9 @@ static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
@@ -1362,11 +1359,9 @@ static const struct driver_info ax88772b_info = {
.description = "ASIX AX88772B USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
@@ -1376,11 +1371,9 @@ static const struct driver_info lxausb_t1l_info = {
.description = "Linux Automation GmbH USB 10Base-T1L",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
.stop = ax88772_stop,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
@@ -1412,10 +1405,8 @@ static const struct driver_info hg20f9_info = {
.description = "HG20F9 USB 2.0 Ethernet",
.bind = ax88772_bind,
.unbind = ax88772_unbind,
- .status = asix_status,
.reset = ax88772_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
- FLAG_MULTI_PACKET,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = FLAG_EEPROM_MAC,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index e4f1663b6204..759dab980a09 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1554,10 +1554,12 @@ static void lan78xx_set_multicast(struct net_device *netdev)
schedule_work(&pdata->set_multicast);
}
+static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
+ bool tx_pause, bool rx_pause);
+
static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
u16 lcladv, u16 rmtadv)
{
- u32 flow = 0, fct_flow = 0;
u8 cap;
if (dev->fc_autoneg)
@@ -1565,27 +1567,13 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
else
cap = dev->fc_request_control;
- if (cap & FLOW_CTRL_TX)
- flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
-
- if (cap & FLOW_CTRL_RX)
- flow |= FLOW_CR_RX_FCEN_;
-
- if (dev->udev->speed == USB_SPEED_SUPER)
- fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
- else if (dev->udev->speed == USB_SPEED_HIGH)
- fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
-
netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
- lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
-
- /* threshold value should be set before enabling flow */
- lan78xx_write_reg(dev, FLOW, flow);
-
- return 0;
+ return lan78xx_configure_flowcontrol(dev,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
}
static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
@@ -1636,15 +1624,30 @@ exit_unlock:
return ret;
}
+/**
+ * lan78xx_phy_int_ack - Acknowledge PHY interrupt
+ * @dev: pointer to the LAN78xx device structure
+ *
+ * This function acknowledges the PHY interrupt by setting the
+ * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
+{
+ return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+}
+
+static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
+
static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd;
int ladv, radv, ret, link;
- u32 buf;
/* clear LAN78xx interrupt status */
- ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+ ret = lan78xx_phy_int_ack(dev);
if (unlikely(ret < 0))
return ret;
@@ -1667,36 +1670,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
phy_ethtool_ksettings_get(phydev, &ecmd);
- if (dev->udev->speed == USB_SPEED_SUPER) {
- if (ecmd.base.speed == 1000) {
- /* disable U2 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- /* enable U1 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf |= USB_CFG1_DEV_U1_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- } else {
- /* enable U1 & U2 */
- ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
- if (ret < 0)
- return ret;
- buf |= USB_CFG1_DEV_U2_INIT_EN_;
- buf |= USB_CFG1_DEV_U1_INIT_EN_;
- ret = lan78xx_write_reg(dev, USB_CFG1, buf);
- if (ret < 0)
- return ret;
- }
- }
+ ret = lan78xx_configure_usb(dev, ecmd.base.speed);
+ if (ret < 0)
+ return ret;
ladv = phy_read(phydev, MII_ADVERTISE);
if (ladv < 0)
@@ -2456,14 +2432,11 @@ static struct irq_chip lan78xx_irqchip = {
static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
{
- struct device_node *of_node;
struct irq_domain *irqdomain;
unsigned int irqmap = 0;
u32 buf;
int ret = 0;
- of_node = dev->udev->dev.parent->of_node;
-
mutex_init(&dev->domain_data.irq_lock);
ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
@@ -2475,8 +2448,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqchip = &lan78xx_irqchip;
dev->domain_data.irq_handler = handle_simple_irq;
- irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
- &chip_domain_ops, &dev->domain_data);
+ irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node),
+ MAX_INT_EP, 0,
+ &chip_domain_ops,
+ &dev->domain_data);
if (irqdomain) {
/* create mapping for PHY interrupt */
irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
@@ -2508,48 +2483,323 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
-static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
+/**
+ * lan78xx_configure_usb - Configure USB link power settings
+ * @dev: pointer to the LAN78xx device structure
+ * @speed: negotiated Ethernet link speed (in Mbps)
+ *
+ * This function configures U1/U2 link power management for SuperSpeed
+ * USB devices based on the current Ethernet link speed. It uses the
+ * USB_CFG1 register to enable or disable U1 and U2 low-power states.
+ *
+ * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
+ * LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
{
- u32 buf;
+ u32 mask, val;
int ret;
+
+ /* Only configure USB settings for SuperSpeed devices */
+ if (dev->udev->speed != USB_SPEED_SUPER)
+ return 0;
+
+ /* LAN7850 does not support USB 3.x */
+ if (dev->chipid == ID_REV_CHIP_ID_7850_) {
+ netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
+ return 0;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ /* Disable U2, enable U1 */
+ ret = lan78xx_update_reg(dev, USB_CFG1,
+ USB_CFG1_DEV_U2_INIT_EN_, 0);
+ if (ret < 0)
+ return ret;
+
+ return lan78xx_update_reg(dev, USB_CFG1,
+ USB_CFG1_DEV_U1_INIT_EN_,
+ USB_CFG1_DEV_U1_INIT_EN_);
+
+ case SPEED_100:
+ case SPEED_10:
+ /* Enable both U1 and U2 */
+ mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
+ val = mask;
+ return lan78xx_update_reg(dev, USB_CFG1, mask, val);
+
+ default:
+ netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
+ return -EINVAL;
+ }
+}
+
+/**
+ * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
+ * @dev: pointer to the LAN78xx device structure
+ * @tx_pause: enable transmission of pause frames
+ * @rx_pause: enable reception of pause frames
+ *
+ * This function configures the LAN78xx flow control settings by writing
+ * to the FLOW and FCT_FLOW registers. The pause time is set to the
+ * maximum allowed value (65535 quanta). FIFO thresholds are selected
+ * based on USB speed.
+ *
+ * The Pause Time field is measured in units of 512-bit times (quanta):
+ * - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
+ * - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
+ * - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
+ *
+ * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
+ * - RXUSED is the number of bytes used in the RX FIFO
+ * - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
+ * - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
+ * - Both thresholds are encoded in units of 512 bytes (rounded up)
+ *
+ * Thresholds differ by USB speed because available USB bandwidth
+ * affects how fast packets can be drained from the RX FIFO:
+ * - USB 3.x (SuperSpeed):
+ * FLOW_ON = 9216 bytes → 18 units
+ * FLOW_OFF = 4096 bytes → 8 units
+ * - USB 2.0 (High-Speed):
+ * FLOW_ON = 8704 bytes → 17 units
+ * FLOW_OFF = 1024 bytes → 2 units
+ *
+ * Note: The FCT_FLOW register must be configured before enabling TX pause
+ * (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
+ bool tx_pause, bool rx_pause)
+{
+ /* Use maximum pause time: 65535 quanta (512-bit times) */
+ const u32 pause_time_quanta = 65535;
+ u32 fct_flow = 0;
+ u32 flow = 0;
+ int ret;
+
+ /* Prepare MAC flow control bits */
+ if (tx_pause)
+ flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
+
+ if (rx_pause)
+ flow |= FLOW_CR_RX_FCEN_;
+
+ /* Select RX FIFO thresholds based on USB speed
+ *
+ * FCT_FLOW layout:
+ * bits [6:0] FLOW_ON threshold (RXUSED ≥ ON → assert pause)
+ * bits [14:8] FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
+ * thresholds are expressed in units of 512 bytes
+ */
+ switch (dev->udev->speed) {
+ case USB_SPEED_SUPER:
+ fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
+ break;
+ case USB_SPEED_HIGH:
+ fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
+ break;
+ default:
+ netdev_warn(dev->net, "Unsupported USB speed: %d\n",
+ dev->udev->speed);
+ return -EINVAL;
+ }
+
+ /* Step 1: Write FIFO thresholds before enabling pause frames */
+ ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ if (ret < 0)
+ return ret;
+
+ /* Step 2: Enable MAC pause functionality */
+ return lan78xx_write_reg(dev, FLOW, flow);
+}
+
+/**
+ * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
+ * @dev: LAN78xx device
+ *
+ * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
+ * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
+ * switch without a visible PHY.
+ *
+ * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
+ */
+static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
+{
struct fixed_phy_status fphy_status = {
.link = 1,
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
};
+
+ netdev_info(dev->net,
+ "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
+
+ return fixed_phy_register(&fphy_status, NULL);
+}
+
+/**
+ * lan78xx_get_phy() - Probe or register PHY device and set interface mode
+ * @dev: LAN78xx device structure
+ *
+ * This function attempts to find a PHY on the MDIO bus. If no PHY is found
+ * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
+ * sets dev->interface based on chip ID and detected PHY type.
+ *
+ * Return: a valid PHY device pointer, or ERR_PTR() on failure.
+ */
+static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
+{
struct phy_device *phydev;
+ /* Attempt to locate a PHY on the MDIO bus */
phydev = phy_find_first(dev->mdiobus);
- if (!phydev) {
- netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
- phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(dev->net, "No PHY/fixed_PHY found\n");
- return NULL;
+
+ switch (dev->chipid) {
+ case ID_REV_CHIP_ID_7801_:
+ if (phydev) {
+ /* External RGMII PHY detected */
+ dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ phydev->is_internal = false;
+
+ if (!phydev->drv)
+ netdev_warn(dev->net,
+ "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
+
+ return phydev;
}
- netdev_dbg(dev->net, "Registered FIXED PHY\n");
+
dev->interface = PHY_INTERFACE_MODE_RGMII;
+ /* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
+ return lan78xx_register_fixed_phy(dev);
+
+ case ID_REV_CHIP_ID_7800_:
+ case ID_REV_CHIP_ID_7850_:
+ if (!phydev)
+ return ERR_PTR(-ENODEV);
+
+ /* These use internal GMII-connected PHY */
+ dev->interface = PHY_INTERFACE_MODE_GMII;
+ phydev->is_internal = true;
+ return phydev;
+
+ default:
+ netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
+ return ERR_PTR(-ENODEV);
+ }
+}
+
+/**
+ * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
+ * @dev: LAN78xx device
+ *
+ * Configure MAC-side registers according to dev->interface, which should be
+ * set by lan78xx_get_phy().
+ *
+ * - For PHY_INTERFACE_MODE_RGMII:
+ * Enable MAC-side TXC delay. This mode seems to be used in a special setup
+ * without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
+ * connected to the KSZ9897 switch, and the link timing is expected to be
+ * hardwired (e.g. via strapping or board layout). No devicetree support is
+ * assumed here.
+ *
+ * - For PHY_INTERFACE_MODE_RGMII_ID:
+ * Disable MAC-side delay and rely on the PHY driver to provide delay.
+ *
+ * - For GMII, no MAC-specific config is needed.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
+{
+ int ret;
+
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ /* Enable MAC-side TX clock delay */
ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
MAC_RGMII_ID_TXC_DELAY_EN_);
+ if (ret < 0)
+ return ret;
+
ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
- ret = lan78xx_read_reg(dev, HW_CFG, &buf);
- buf |= HW_CFG_CLK125_EN_;
- buf |= HW_CFG_REFCLK25_EN_;
- ret = lan78xx_write_reg(dev, HW_CFG, buf);
- } else {
- if (!phydev->drv) {
- netdev_err(dev->net, "no PHY driver found\n");
- return NULL;
- }
- dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
- /* The PHY driver is responsible to configure proper RGMII
- * interface delays. Disable RGMII delays on MAC side.
- */
- lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_update_reg(dev, HW_CFG,
+ HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
+ HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
+ if (ret < 0)
+ return ret;
+
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* Disable MAC-side TXC delay, PHY provides it */
+ ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
+ if (ret < 0)
+ return ret;
+
+ break;
- phydev->is_internal = false;
+ case PHY_INTERFACE_MODE_GMII:
+ /* No MAC-specific configuration required */
+ break;
+
+ default:
+ netdev_warn(dev->net, "Unsupported interface mode: %d\n",
+ dev->interface);
+ break;
}
- return phydev;
+
+ return 0;
+}
+
+/**
+ * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
+ * @dev: LAN78xx device
+ * @phydev: PHY device (must be valid)
+ *
+ * Reads "microchip,led-modes" property from the PHY's DT node and enables
+ * the corresponding number of LEDs by writing to HW_CFG.
+ *
+ * This helper preserves the original logic, enabling up to 4 LEDs.
+ * If the property is not present, this function does nothing.
+ *
+ * Return: 0 on success or a negative error code.
+ */
+static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
+ struct phy_device *phydev)
+{
+ struct device_node *np = phydev->mdio.dev.of_node;
+ u32 reg;
+ int len, ret;
+
+ if (!np)
+ return 0;
+
+ len = of_property_count_elems_of_size(np, "microchip,led-modes",
+ sizeof(u32));
+ if (len < 0)
+ return 0;
+
+ ret = lan78xx_read_reg(dev, HW_CFG, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
+ HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
+
+ reg |= (len > 0) * HW_CFG_LED0_EN_ |
+ (len > 1) * HW_CFG_LED1_EN_ |
+ (len > 2) * HW_CFG_LED2_EN_ |
+ (len > 3) * HW_CFG_LED3_EN_;
+
+ return lan78xx_write_reg(dev, HW_CFG, reg);
}
static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2559,30 +2809,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
u32 mii_adv;
struct phy_device *phydev;
- switch (dev->chipid) {
- case ID_REV_CHIP_ID_7801_:
- phydev = lan7801_phy_init(dev);
- if (!phydev) {
- netdev_err(dev->net, "lan7801: PHY Init Failed");
- return -EIO;
- }
- break;
-
- case ID_REV_CHIP_ID_7800_:
- case ID_REV_CHIP_ID_7850_:
- phydev = phy_find_first(dev->mdiobus);
- if (!phydev) {
- netdev_err(dev->net, "no PHY found\n");
- return -EIO;
- }
- phydev->is_internal = true;
- dev->interface = PHY_INTERFACE_MODE_GMII;
- break;
+ phydev = lan78xx_get_phy(dev);
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
- default:
- netdev_err(dev->net, "Unknown CHIP ID found\n");
- return -EIO;
- }
+ ret = lan78xx_mac_prepare_for_phy(dev);
+ if (ret < 0)
+ goto free_phy;
/* if phyirq is not set, use polling mode in phylib */
if (dev->domain_data.phyirq > 0)
@@ -2624,33 +2857,23 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
phy_support_eee(phydev);
- if (phydev->mdio.dev.of_node) {
- u32 reg;
- int len;
-
- len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
- "microchip,led-modes",
- sizeof(u32));
- if (len >= 0) {
- /* Ensure the appropriate LEDs are enabled */
- lan78xx_read_reg(dev, HW_CFG, &reg);
- reg &= ~(HW_CFG_LED0_EN_ |
- HW_CFG_LED1_EN_ |
- HW_CFG_LED2_EN_ |
- HW_CFG_LED3_EN_);
- reg |= (len > 0) * HW_CFG_LED0_EN_ |
- (len > 1) * HW_CFG_LED1_EN_ |
- (len > 2) * HW_CFG_LED2_EN_ |
- (len > 3) * HW_CFG_LED3_EN_;
- lan78xx_write_reg(dev, HW_CFG, reg);
- }
- }
+ ret = lan78xx_configure_leds_from_dt(dev, phydev);
+ if (ret)
+ goto free_phy;
genphy_config_aneg(phydev);
dev->fc_autoneg = phydev->autoneg;
return 0;
+
+free_phy:
+ if (phy_is_pseudo_fixed_link(phydev)) {
+ fixed_phy_unregister(phydev);
+ phy_device_free(phydev);
+ }
+
+ return ret;
}
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cab046749a9..d6589b24c68d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,7 +26,7 @@
#include <linux/atomic.h>
#include <linux/acpi.h>
#include <linux/firmware.h>
-#include <crypto/hash.h>
+#include <crypto/sha2.h>
#include <linux/usb/r8152.h>
#include <net/gso.h>
@@ -1665,14 +1665,14 @@ static int
rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex,
u32 advertising);
-static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
+static int __rtl8152_set_mac_address(struct net_device *netdev,
+ struct sockaddr_storage *addr,
bool in_resume)
{
struct r8152 *tp = netdev_priv(netdev);
- struct sockaddr *addr = p;
int ret = -EADDRNOTAVAIL;
- if (!is_valid_ether_addr(addr->sa_data))
+ if (!is_valid_ether_addr(addr->__data))
goto out1;
if (!in_resume) {
@@ -1683,10 +1683,10 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
mutex_lock(&tp->control);
- eth_hw_addr_set(netdev, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->__data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
- pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->__data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
mutex_unlock(&tp->control);
@@ -1706,7 +1706,8 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
* host system provided MAC address.
* Examples of this are Dell TB15 and Dell WD15 docks
*/
-static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+static int vendor_mac_passthru_addr_read(struct r8152 *tp,
+ struct sockaddr_storage *ss)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -1774,47 +1775,48 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
ret = -EINVAL;
goto amacout;
}
- memcpy(sa->sa_data, buf, 6);
+ memcpy(ss->__data, buf, 6);
tp->netdev->addr_assign_type = NET_ADDR_STOLEN;
netif_info(tp, probe, tp->netdev,
- "Using pass-thru MAC addr %pM\n", sa->sa_data);
+ "Using pass-thru MAC addr %pM\n", ss->__data);
amacout:
kfree(obj);
return ret;
}
-static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
+static int determine_ethernet_addr(struct r8152 *tp,
+ struct sockaddr_storage *ss)
{
struct net_device *dev = tp->netdev;
int ret;
- sa->sa_family = dev->type;
+ ss->ss_family = dev->type;
- ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data);
+ ret = eth_platform_get_mac_address(&tp->udev->dev, ss->__data);
if (ret < 0) {
if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
+ ret = pla_ocp_read(tp, PLA_IDR, 8, ss->__data);
} else {
/* if device doesn't support MAC pass through this will
* be expected to be non-zero
*/
- ret = vendor_mac_passthru_addr_read(tp, sa);
+ ret = vendor_mac_passthru_addr_read(tp, ss);
if (ret < 0)
ret = pla_ocp_read(tp, PLA_BACKUP, 8,
- sa->sa_data);
+ ss->__data);
}
}
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
- } else if (!is_valid_ether_addr(sa->sa_data)) {
+ } else if (!is_valid_ether_addr(ss->__data)) {
netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
- sa->sa_data);
+ ss->__data);
eth_hw_addr_random(dev);
- ether_addr_copy(sa->sa_data, dev->dev_addr);
+ ether_addr_copy(ss->__data, dev->dev_addr);
netif_info(tp, probe, dev, "Random ether addr %pM\n",
- sa->sa_data);
+ ss->__data);
return 0;
}
@@ -1824,17 +1826,17 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
{
struct net_device *dev = tp->netdev;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
int ret;
- ret = determine_ethernet_addr(tp, &sa);
+ ret = determine_ethernet_addr(tp, &ss);
if (ret < 0)
return ret;
if (tp->version == RTL_VER_01)
- eth_hw_addr_set(dev, sa.sa_data);
+ eth_hw_addr_set(dev, ss.__data);
else
- ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
+ ret = __rtl8152_set_mac_address(dev, &ss, in_resume);
return ret;
}
@@ -4628,48 +4630,16 @@ out:
static long rtl8152_fw_verify_checksum(struct r8152 *tp,
struct fw_header *fw_hdr, size_t size)
{
- unsigned char checksum[sizeof(fw_hdr->checksum)];
- struct crypto_shash *alg;
- struct shash_desc *sdesc;
- size_t len;
- long rc;
-
- alg = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(alg)) {
- rc = PTR_ERR(alg);
- goto out;
- }
-
- if (crypto_shash_digestsize(alg) != sizeof(fw_hdr->checksum)) {
- rc = -EFAULT;
- dev_err(&tp->intf->dev, "digestsize incorrect (%u)\n",
- crypto_shash_digestsize(alg));
- goto free_shash;
- }
+ u8 checksum[sizeof(fw_hdr->checksum)];
- len = sizeof(*sdesc) + crypto_shash_descsize(alg);
- sdesc = kmalloc(len, GFP_KERNEL);
- if (!sdesc) {
- rc = -ENOMEM;
- goto free_shash;
- }
- sdesc->tfm = alg;
-
- len = size - sizeof(fw_hdr->checksum);
- rc = crypto_shash_digest(sdesc, fw_hdr->version, len, checksum);
- kfree(sdesc);
- if (rc)
- goto free_shash;
+ BUILD_BUG_ON(sizeof(checksum) != SHA256_DIGEST_SIZE);
+ sha256(fw_hdr->version, size - sizeof(checksum), checksum);
- if (memcmp(fw_hdr->checksum, checksum, sizeof(fw_hdr->checksum))) {
+ if (memcmp(fw_hdr->checksum, checksum, sizeof(checksum))) {
dev_err(&tp->intf->dev, "checksum fail\n");
- rc = -EFAULT;
+ return -EFAULT;
}
-
-free_shash:
- crypto_free_shash(alg);
-out:
- return rc;
+ return 0;
}
static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw)
@@ -8453,7 +8423,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
- struct sockaddr sa;
+ struct sockaddr_storage ss;
if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
goto exit;
@@ -8461,8 +8431,8 @@ static int rtl8152_post_reset(struct usb_interface *intf)
rtl_set_accessible(tp);
/* reset the MAC address in case of policy change */
- if (determine_ethernet_addr(tp, &sa) >= 0)
- dev_set_mac_address (tp->netdev, &sa, NULL);
+ if (determine_ethernet_addr(tp, &ss) >= 0)
+ dev_set_mac_address(tp->netdev, &ss, NULL);
netdev = tp->netdev;
if (!netif_running(netdev))
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 7bb53961c0ea..e58a0f1b5c5b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -307,12 +307,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
{
- if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb)))
+ return NETDEV_TX_BUSY; /* signal qdisc layer */
- return NET_RX_SUCCESS;
+ return NET_RX_SUCCESS; /* same as NETDEV_TX_OK */
}
static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
@@ -346,11 +344,11 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
- int ret = NETDEV_TX_OK;
+ struct netdev_queue *txq;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
- int rxq;
+ int ret, rxq;
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
@@ -373,17 +371,45 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+
+ ret = veth_forward_skb(rcv, skb, rq, use_napi);
+ switch (ret) {
+ case NET_RX_SUCCESS: /* same as NETDEV_TX_OK */
if (!use_napi)
dev_sw_netstats_tx_add(dev, 1, length);
else
__veth_xdp_flush(rq);
- } else {
+ break;
+ case NETDEV_TX_BUSY:
+ /* If a qdisc is attached to our virtual device, returning
+ * NETDEV_TX_BUSY is allowed.
+ */
+ txq = netdev_get_tx_queue(dev, rxq);
+
+ if (qdisc_txq_has_no_queue(txq)) {
+ dev_kfree_skb_any(skb);
+ goto drop;
+ }
+ /* Restore Eth hdr pulled by dev_forward_skb/eth_type_trans */
+ __skb_push(skb, ETH_HLEN);
+ /* Depend on prior success packets started NAPI consumer via
+ * __veth_xdp_flush(). Cancel TXQ stop if consumer stopped,
+ * paired with empty check in veth_poll().
+ */
+ netif_tx_stop_queue(txq);
+ smp_mb__after_atomic();
+ if (unlikely(__ptr_ring_empty(&rq->xdp_ring)))
+ netif_tx_wake_queue(txq);
+ break;
+ case NET_RX_DROP: /* same as NET_XMIT_DROP */
drop:
atomic64_inc(&priv->dropped);
ret = NET_XMIT_DROP;
+ break;
+ default:
+ net_crit_ratelimited("%s(%s): Invalid return code(%d)",
+ __func__, dev->name, ret);
}
-
rcu_read_unlock();
return ret;
@@ -874,9 +900,17 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
struct veth_xdp_tx_bq *bq,
struct veth_stats *stats)
{
+ struct veth_priv *priv = netdev_priv(rq->dev);
+ int queue_idx = rq->xdp_rxq.queue_index;
+ struct netdev_queue *peer_txq;
+ struct net_device *peer_dev;
int i, done = 0, n_xdpf = 0;
void *xdpf[VETH_XDP_BATCH];
+ /* NAPI functions as RCU section */
+ peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
+ peer_txq = netdev_get_tx_queue(peer_dev, queue_idx);
+
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
@@ -925,6 +959,9 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
+ if (unlikely(netif_tx_queue_stopped(peer_txq)))
+ netif_tx_wake_queue(peer_txq);
+
return done;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3df6aabc7e33..2440e30c5bd1 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -27,6 +27,10 @@
#include <linux/module.h>
#include <net/ip6_checksum.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
+#endif
+
#include "vmxnet3_int.h"
#include "vmxnet3_xdp.h"
@@ -3607,8 +3611,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- WRITE_ONCE(netdev->mtu, new_mtu);
-
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
@@ -3622,6 +3624,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy_all(adapter);
+ WRITE_ONCE(netdev->mtu, new_mtu);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
@@ -3638,6 +3641,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
"Closing it\n", err);
goto out;
}
+ } else {
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
out:
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7168b33adadb..9a4beea6ee0c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -343,15 +343,13 @@ unlock:
static bool qdisc_tx_is_default(const struct net_device *dev)
{
struct netdev_queue *txq;
- struct Qdisc *qdisc;
if (dev->num_tx_queues > 1)
return false;
txq = netdev_get_tx_queue(dev, 0);
- qdisc = rcu_access_pointer(txq->qdisc);
- return !qdisc->enqueue;
+ return qdisc_txq_has_no_queue(txq);
}
/* Local traffic destined to local address. Reinsert the packet to rx
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 9ccc3f09f71b..a56d7239b127 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -15,6 +15,7 @@
#include <linux/igmp.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/rhashtable.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/gro.h>
@@ -63,8 +64,12 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
-/* salt for hash table */
-static u32 vxlan_salt __read_mostly;
+static const struct rhashtable_params vxlan_fdb_rht_params = {
+ .head_offset = offsetof(struct vxlan_fdb, rhnode),
+ .key_offset = offsetof(struct vxlan_fdb, key),
+ .key_len = sizeof(struct vxlan_fdb_key),
+ .automatic_shrinking = true,
+};
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
@@ -186,7 +191,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
} else if (nh) {
ndm->ndm_family = nh_family;
}
- send_eth = !is_zero_ether_addr(fdb->eth_addr);
+ send_eth = !is_zero_ether_addr(fdb->key.eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_state = fdb->state;
@@ -201,7 +206,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
peernet2id(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
- if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
+ if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.eth_addr))
goto nla_put_failure;
if (nh) {
if (nla_put_u32(skb, NDA_NH_ID, nh_id))
@@ -223,9 +228,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
goto nla_put_failure;
}
- if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+ if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->key.vni &&
nla_put_u32(skb, NDA_SRC_VNI,
- be32_to_cpu(fdb->vni)))
+ be32_to_cpu(fdb->key.vni)))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
@@ -293,8 +298,8 @@ static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
fdb_info->remote_port = rd->remote_port;
fdb_info->remote_vni = rd->remote_vni;
fdb_info->remote_ifindex = rd->remote_ifindex;
- memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
- fdb_info->vni = fdb->vni;
+ memcpy(fdb_info->eth_addr, fdb->key.eth_addr, ETH_ALEN);
+ fdb_info->vni = fdb->key.vni;
fdb_info->offloaded = rd->offloaded;
fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
}
@@ -366,67 +371,42 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
};
struct vxlan_rdst remote = { };
- memcpy(f.eth_addr, eth_addr, ETH_ALEN);
+ memcpy(f.key.eth_addr, eth_addr, ETH_ALEN);
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL);
}
-/* Hash Ethernet address */
-static u32 eth_hash(const unsigned char *addr)
-{
- u64 value = get_unaligned((u64 *)addr);
-
- /* only want 6 bytes */
-#ifdef __BIG_ENDIAN
- value >>= 16;
-#else
- value <<= 16;
-#endif
- return hash_64(value, FDB_HASH_BITS);
-}
-
-u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+/* Look up Ethernet address in forwarding table */
+static struct vxlan_fdb *vxlan_find_mac_rcu(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 vni)
{
- /* use 1 byte of OUI and 3 bytes of NIC */
- u32 key = get_unaligned((u32 *)(addr + 2));
-
- return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
-}
+ struct vxlan_fdb_key key;
-u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
-{
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
- return eth_vni_hash(mac, vni);
+ memset(&key, 0, sizeof(key));
+ memcpy(key.eth_addr, mac, sizeof(key.eth_addr));
+ if (!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA))
+ key.vni = vxlan->default_dst.remote_vni;
else
- return eth_hash(mac);
-}
+ key.vni = vni;
-/* Hash chain to use given mac address */
-static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 vni)
-{
- return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
+ return rhashtable_lookup(&vxlan->fdb_hash_tbl, &key,
+ vxlan_fdb_rht_params);
}
-/* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 vni)
+static struct vxlan_fdb *vxlan_find_mac_tx(struct vxlan_dev *vxlan,
+ const u8 *mac, __be32 vni)
{
- struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
struct vxlan_fdb *f;
- hlist_for_each_entry_rcu(f, head, hlist) {
- if (ether_addr_equal(mac, f->eth_addr)) {
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
- if (vni == f->vni)
- return f;
- } else {
- return f;
- }
- }
+ f = vxlan_find_mac_rcu(vxlan, mac, vni);
+ if (f) {
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(f->used) != now)
+ WRITE_ONCE(f->used, now);
}
- return NULL;
+ return f;
}
static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
@@ -434,13 +414,11 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac, vni);
- if (f) {
- unsigned long now = jiffies;
+ lockdep_assert_held_once(&vxlan->hash_lock);
- if (READ_ONCE(f->used) != now)
- WRITE_ONCE(f->used, now);
- }
+ rcu_read_lock();
+ f = vxlan_find_mac_rcu(vxlan, mac, vni);
+ rcu_read_unlock();
return f;
}
@@ -480,7 +458,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
rcu_read_lock();
- f = __vxlan_find_mac(vxlan, eth_addr, vni);
+ f = vxlan_find_mac_rcu(vxlan, eth_addr, vni);
if (!f) {
rc = -ENOENT;
goto out;
@@ -517,32 +495,28 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- unsigned int h;
int rc = 0;
if (!netif_is_vxlan(dev))
return -EINVAL;
vxlan = netdev_priv(dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
- if (f->vni == vni) {
- list_for_each_entry(rdst, &f->remotes, list) {
- rc = vxlan_fdb_notify_one(nb, vxlan,
- f, rdst,
- extack);
- if (rc)
- goto unlock;
- }
+ spin_lock_bh(&vxlan->hash_lock);
+ hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+ if (f->key.vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list) {
+ rc = vxlan_fdb_notify_one(nb, vxlan, f, rdst,
+ extack);
+ if (rc)
+ goto unlock;
}
}
- spin_unlock_bh(&vxlan->hash_lock[h]);
}
+ spin_unlock_bh(&vxlan->hash_lock);
return 0;
unlock:
- spin_unlock_bh(&vxlan->hash_lock[h]);
+ spin_unlock_bh(&vxlan->hash_lock);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
@@ -552,20 +526,19 @@ void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
struct vxlan_dev *vxlan;
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- unsigned int h;
if (!netif_is_vxlan(dev))
return;
vxlan = netdev_priv(dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
- if (f->vni == vni)
- list_for_each_entry(rdst, &f->remotes, list)
- rdst->offloaded = false;
- spin_unlock_bh(&vxlan->hash_lock[h]);
+ spin_lock_bh(&vxlan->hash_lock);
+ hlist_for_each_entry(f, &vxlan->fdb_list, fdb_node) {
+ if (f->key.vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list)
+ rdst->offloaded = false;
+ }
}
+ spin_unlock_bh(&vxlan->hash_lock);
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
@@ -610,10 +583,10 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
if (rd == NULL)
return -ENOMEM;
- if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
- kfree(rd);
- return -ENOMEM;
- }
+ /* The driver can work correctly without a dst cache, so do not treat
+ * dst cache initialization errors as fatal.
+ */
+ dst_cache_init(&rd->dst_cache, GFP_ATOMIC | __GFP_NOWARN);
rd->remote_ip = *ip;
rd->remote_port = port;
@@ -803,27 +776,20 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac,
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return NULL;
+ memset(&f->key, 0, sizeof(f->key));
f->state = state;
f->flags = ndm_flags;
f->updated = f->used = jiffies;
- f->vni = src_vni;
+ f->key.vni = src_vni;
f->nh = NULL;
RCU_INIT_POINTER(f->vdev, vxlan);
INIT_LIST_HEAD(&f->nh_list);
INIT_LIST_HEAD(&f->remotes);
- memcpy(f->eth_addr, mac, ETH_ALEN);
+ memcpy(f->key.eth_addr, mac, ETH_ALEN);
return f;
}
-static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
- __be32 src_vni, struct vxlan_fdb *f)
-{
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-}
-
static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
u32 nhid, struct netlink_ext_ack *extack)
{
@@ -913,10 +879,27 @@ int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (rc < 0)
goto errout;
+ rc = rhashtable_lookup_insert_fast(&vxlan->fdb_hash_tbl, &f->rhnode,
+ vxlan_fdb_rht_params);
+ if (rc)
+ goto destroy_remote;
+
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->fdb_node, &vxlan->fdb_list);
+
*fdb = f;
return 0;
+destroy_remote:
+ if (rcu_access_pointer(f->nh)) {
+ list_del_rcu(&f->nh_list);
+ nexthop_put(rtnl_dereference(f->nh));
+ } else {
+ list_del(&rd->list);
+ dst_cache_destroy(&rd->dst_cache);
+ kfree(rd);
+ }
errout:
kfree(f);
return rc;
@@ -953,7 +936,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
{
struct vxlan_rdst *rd;
- netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr);
+ netdev_dbg(vxlan->dev, "delete %pM\n", f->key.eth_addr);
--vxlan->addrcnt;
if (do_notify) {
@@ -966,7 +949,9 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
swdev_notify, NULL);
}
- hlist_del_rcu(&f->hlist);
+ hlist_del_init_rcu(&f->fdb_node);
+ rhashtable_remove_fast(&vxlan->fdb_hash_tbl, &f->rhnode,
+ vxlan_fdb_rht_params);
list_del_rcu(&f->nh_list);
call_rcu(&f->rcu, vxlan_fdb_free);
}
@@ -1024,8 +1009,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
if ((flags & NLM_F_REPLACE)) {
/* Only change unicasts */
- if (!(is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
+ if (!(is_multicast_ether_addr(f->key.eth_addr) ||
+ is_zero_ether_addr(f->key.eth_addr))) {
if (nhid) {
rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack);
if (rc < 0)
@@ -1041,8 +1026,8 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
}
if ((flags & NLM_F_APPEND) &&
- (is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
+ (is_multicast_ether_addr(f->key.eth_addr) ||
+ is_zero_ether_addr(f->key.eth_addr))) {
rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
if (rc < 0)
@@ -1101,7 +1086,6 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
if (rc < 0)
return rc;
- vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
@@ -1125,7 +1109,7 @@ int vxlan_fdb_update(struct vxlan_dev *vxlan,
{
struct vxlan_fdb *f;
- f = __vxlan_find_mac(vxlan, mac, src_vni);
+ f = vxlan_find_mac(vxlan, mac, src_vni);
if (f) {
if (flags & NLM_F_EXCL) {
netdev_dbg(vxlan->dev,
@@ -1253,7 +1237,6 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
__be16 port;
__be32 src_vni, vni;
u32 ifindex, nhid;
- u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -1273,13 +1256,12 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
- hash_index = fdb_head_index(vxlan, addr, src_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
nhid, true, extack);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
if (!err)
*notified = true;
@@ -1296,7 +1278,7 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
int err = -ENOENT;
- f = __vxlan_find_mac(vxlan, addr, src_vni);
+ f = vxlan_find_mac(vxlan, addr, src_vni);
if (!f)
return err;
@@ -1330,7 +1312,6 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
union vxlan_addr ip;
__be32 src_vni, vni;
u32 ifindex, nhid;
- u32 hash_index;
__be16 port;
int err;
@@ -1339,11 +1320,10 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
if (err)
return err;
- hash_index = fdb_head_index(vxlan, addr, src_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
if (!err)
*notified = true;
@@ -1358,52 +1338,46 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
{
struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
- unsigned int h;
+ struct vxlan_fdb *f;
int err = 0;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct vxlan_fdb *f;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
- struct vxlan_rdst *rd;
-
- if (rcu_access_pointer(f->nh)) {
- if (*idx < ctx->fdb_idx)
- goto skip_nh;
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, NULL);
- if (err < 0) {
- rcu_read_unlock();
- goto out;
- }
-skip_nh:
- *idx += 1;
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ struct vxlan_rdst *rd;
+
+ if (rcu_access_pointer(f->nh)) {
+ if (*idx < ctx->fdb_idx)
+ goto skip_nh;
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI, NULL);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto out;
}
+skip_nh:
+ *idx += 1;
+ continue;
+ }
- list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (*idx < ctx->fdb_idx)
- goto skip;
-
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI, rd);
- if (err < 0) {
- rcu_read_unlock();
- goto out;
- }
-skip:
- *idx += 1;
+ list_for_each_entry_rcu(rd, &f->remotes, list) {
+ if (*idx < ctx->fdb_idx)
+ goto skip;
+
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH, NLM_F_MULTI, rd);
+ if (err < 0) {
+ rcu_read_unlock();
+ goto out;
}
+skip:
+ *idx += 1;
}
- rcu_read_unlock();
}
+ rcu_read_unlock();
out:
return err;
}
@@ -1427,7 +1401,7 @@ static int vxlan_fdb_get(struct sk_buff *skb,
rcu_read_lock();
- f = __vxlan_find_mac(vxlan, addr, vni);
+ f = vxlan_find_mac_rcu(vxlan, addr, vni);
if (!f) {
NL_SET_ERR_MSG(extack, "Fdb entry not found");
err = -ENOENT;
@@ -1463,7 +1437,7 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
ifindex = src_ifindex;
#endif
- f = __vxlan_find_mac(vxlan, src_mac, vni);
+ f = vxlan_find_mac_rcu(vxlan, src_mac, vni);
if (likely(f)) {
struct vxlan_rdst *rdst = first_remote_rcu(f);
unsigned long now = jiffies;
@@ -1491,10 +1465,8 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
- u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
-
/* learned new entry */
- spin_lock(&vxlan->hash_lock[hash_index]);
+ spin_lock(&vxlan->hash_lock);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
@@ -1505,7 +1477,7 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, 0, true, NULL);
- spin_unlock(&vxlan->hash_lock[hash_index]);
+ spin_unlock(&vxlan->hash_lock);
}
return SKB_NOT_DROPPED_YET;
@@ -1916,12 +1888,15 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha, vni);
+ rcu_read_lock();
+ f = vxlan_find_mac_tx(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
+ rcu_read_unlock();
goto out;
}
+ rcu_read_unlock();
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
n->ha, sha);
@@ -2080,7 +2055,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
}
- f = vxlan_find_mac(vxlan, n->ha, vni);
+ f = vxlan_find_mac_tx(vxlan, n->ha, vni);
if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
@@ -2648,14 +2623,10 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
memset(&nh_rdst, 0, sizeof(struct vxlan_rdst));
hash = skb_get_hash(skb);
- rcu_read_lock();
nh = rcu_dereference(f->nh);
- if (!nh) {
- rcu_read_unlock();
+ if (!nh)
goto drop;
- }
do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst);
- rcu_read_unlock();
if (likely(do_xmit))
vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc);
@@ -2782,7 +2753,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
eth = eth_hdr(skb);
- f = vxlan_find_mac(vxlan, eth->h_dest, vni);
+ rcu_read_lock();
+ f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
@@ -2790,11 +2762,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
- f = vxlan_find_mac(vxlan, eth->h_dest, vni);
+ f = vxlan_find_mac_tx(vxlan, eth->h_dest, vni);
}
if (f == NULL) {
- f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
+ f = vxlan_find_mac_tx(vxlan, all_zeros_mac, vni);
if (f == NULL) {
if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
!is_multicast_ether_addr(eth->h_dest))
@@ -2804,7 +2776,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
- return NETDEV_TX_OK;
+ goto out;
}
}
@@ -2829,6 +2801,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
}
+out:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -2837,38 +2811,36 @@ static void vxlan_cleanup(struct timer_list *t)
{
struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
- unsigned int h;
+ struct vxlan_fdb *f;
if (!netif_running(vxlan->dev))
return;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct hlist_node *p, *n;
-
- spin_lock(&vxlan->hash_lock[h]);
- hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
- struct vxlan_fdb *f
- = container_of(p, struct vxlan_fdb, hlist);
- unsigned long timeout;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ unsigned long timeout;
- if (f->state & (NUD_PERMANENT | NUD_NOARP))
- continue;
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
+ continue;
- if (f->flags & NTF_EXT_LEARNED)
- continue;
+ if (f->flags & NTF_EXT_LEARNED)
+ continue;
- timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
- if (time_before_eq(timeout, jiffies)) {
- netdev_dbg(vxlan->dev,
- "garbage collect %pM\n",
- f->eth_addr);
+ timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ;
+ if (time_before_eq(timeout, jiffies)) {
+ spin_lock(&vxlan->hash_lock);
+ if (!hlist_unhashed(&f->fdb_node)) {
+ netdev_dbg(vxlan->dev, "garbage collect %pM\n",
+ f->key.eth_addr);
f->state = NUD_STALE;
vxlan_fdb_destroy(vxlan, f, true, true);
- } else if (time_before(timeout, next_timer))
- next_timer = timeout;
+ }
+ spin_unlock(&vxlan->hash_lock);
+ } else if (time_before(timeout, next_timer)) {
+ next_timer = timeout;
}
- spin_unlock(&vxlan->hash_lock[h]);
}
+ rcu_read_unlock();
mod_timer(&vxlan->age_timer, next_timer);
}
@@ -2903,10 +2875,14 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
+ err = rhashtable_init(&vxlan->fdb_hash_tbl, &vxlan_fdb_rht_params);
+ if (err)
+ return err;
+
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
err = vxlan_vnigroup_init(vxlan);
if (err)
- return err;
+ goto err_rhashtable_destroy;
}
err = gro_cells_init(&vxlan->gro_cells, dev);
@@ -2925,21 +2901,11 @@ err_gro_cells_destroy:
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
+err_rhashtable_destroy:
+ rhashtable_destroy(&vxlan->fdb_hash_tbl);
return err;
}
-static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
-{
- struct vxlan_fdb *f;
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
- f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
- if (f)
- vxlan_fdb_destroy(vxlan, f, true, true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
-}
-
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2951,7 +2917,7 @@ static void vxlan_uninit(struct net_device *dev)
gro_cells_destroy(&vxlan->gro_cells);
- vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
+ rhashtable_destroy(&vxlan->fdb_hash_tbl);
}
/* Start ageing timer and join group when device is brought up */
@@ -2992,7 +2958,8 @@ struct vxlan_fdb_flush_desc {
static bool vxlan_fdb_is_default_entry(const struct vxlan_fdb *f,
const struct vxlan_dev *vxlan)
{
- return is_zero_ether_addr(f->eth_addr) && f->vni == vxlan->cfg.vni;
+ return is_zero_ether_addr(f->key.eth_addr) &&
+ f->key.vni == vxlan->cfg.vni;
}
static bool vxlan_fdb_nhid_matches(const struct vxlan_fdb *f, u32 nhid)
@@ -3015,7 +2982,7 @@ static bool vxlan_fdb_flush_matches(const struct vxlan_fdb *f,
if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan))
return false;
- if (desc->src_vni && f->vni != desc->src_vni)
+ if (desc->src_vni && f->key.vni != desc->src_vni)
return false;
if (desc->nhid && !vxlan_fdb_nhid_matches(f, desc->nhid))
@@ -3071,33 +3038,32 @@ static void vxlan_flush(struct vxlan_dev *vxlan,
const struct vxlan_fdb_flush_desc *desc)
{
bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc);
- unsigned int h;
-
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- struct hlist_node *p, *n;
+ struct vxlan_fdb *f;
- spin_lock_bh(&vxlan->hash_lock[h]);
- hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
- struct vxlan_fdb *f
- = container_of(p, struct vxlan_fdb, hlist);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_list, fdb_node) {
+ if (!vxlan_fdb_flush_matches(f, vxlan, desc))
+ continue;
- if (!vxlan_fdb_flush_matches(f, vxlan, desc))
- continue;
+ spin_lock_bh(&vxlan->hash_lock);
+ if (hlist_unhashed(&f->fdb_node))
+ goto unlock;
- if (match_remotes) {
- bool destroy_fdb = false;
+ if (match_remotes) {
+ bool destroy_fdb = false;
- vxlan_fdb_flush_match_remotes(f, vxlan, desc,
- &destroy_fdb);
+ vxlan_fdb_flush_match_remotes(f, vxlan, desc,
+ &destroy_fdb);
- if (!destroy_fdb)
- continue;
- }
-
- vxlan_fdb_destroy(vxlan, f, true, true);
+ if (!destroy_fdb)
+ goto unlock;
}
- spin_unlock_bh(&vxlan->hash_lock[h]);
+
+ vxlan_fdb_destroy(vxlan, f, true, true);
+unlock:
+ spin_unlock_bh(&vxlan->hash_lock);
}
+ rcu_read_unlock();
}
static const struct nla_policy vxlan_del_bulk_policy[NDA_MAX + 1] = {
@@ -3185,7 +3151,7 @@ static int vxlan_stop(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb_flush_desc desc = {
- /* Default entry is deleted at vxlan_uninit. */
+ /* Default entry is deleted at vxlan_dellink. */
.ignore_default_entry = true,
.state = 0,
.state_mask = NUD_PERMANENT | NUD_NOARP,
@@ -3348,7 +3314,6 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
static void vxlan_setup(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- unsigned int h;
eth_hw_addr_random(dev);
ether_setup(dev);
@@ -3375,15 +3340,13 @@ static void vxlan_setup(struct net_device *dev)
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
INIT_LIST_HEAD(&vxlan->next);
+ spin_lock_init(&vxlan->hash_lock);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
vxlan->dev = dev;
- for (h = 0; h < FDB_HASH_SIZE; ++h) {
- spin_lock_init(&vxlan->hash_lock[h]);
- INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
- }
+ INIT_HLIST_HEAD(&vxlan->fdb_list);
}
static void vxlan_ether_setup(struct net_device *dev)
@@ -3960,8 +3923,6 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct net_device *remote_dev = NULL;
- struct vxlan_fdb *f = NULL;
- bool unregister = false;
struct vxlan_rdst *dst;
int err;
@@ -3972,72 +3933,54 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
dev->ethtool_ops = &vxlan_ethtool_ops;
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&dst->remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &dst->remote_ip,
- NUD_REACHABLE | NUD_PERMANENT,
- vxlan->cfg.dst_port,
- dst->remote_vni,
- dst->remote_vni,
- dst->remote_ifindex,
- NTF_SELF, 0, &f, extack);
- if (err)
- return err;
- }
-
err = register_netdevice(dev);
if (err)
- goto errout;
- unregister = true;
+ return err;
if (dst->remote_ifindex) {
remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
if (!remote_dev) {
err = -ENODEV;
- goto errout;
+ goto unregister;
}
err = netdev_upper_dev_link(remote_dev, dev, extack);
if (err)
- goto errout;
+ goto unregister;
+
+ dst->remote_dev = remote_dev;
}
err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto unlink;
- if (f) {
- vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
-
- /* notify default fdb entry */
- err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
- RTM_NEWNEIGH, true, extack);
- if (err) {
- vxlan_fdb_destroy(vxlan, f, false, false);
- if (remote_dev)
- netdev_upper_dev_unlink(remote_dev, dev);
- goto unregister;
- }
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ &dst->remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+ NTF_SELF, 0, true, extack);
+ spin_unlock_bh(&vxlan->hash_lock);
+ if (err)
+ goto unlink;
}
list_add(&vxlan->next, &vn->vxlan_list);
- if (remote_dev)
- dst->remote_dev = remote_dev;
+
return 0;
+
unlink:
if (remote_dev)
netdev_upper_dev_unlink(remote_dev, dev);
-errout:
- /* unregister_netdevice() destroys the default FDB entry with deletion
- * notification. But the addition notification was not sent yet, so
- * destroy the entry by hand here.
- */
- if (f)
- __vxlan_fdb_free(f);
unregister:
- if (unregister)
- unregister_netdevice(dev);
+ unregister_netdevice(dev);
return err;
}
@@ -4454,9 +4397,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
/* handle default dst entry */
if (rem_ip_changed) {
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
@@ -4467,7 +4408,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
conf.remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
netdev_adjacent_change_abort(dst->remote_dev,
lowerdev, dev);
return err;
@@ -4481,7 +4422,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
dst->remote_vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
/* If vni filtering device, also update fdb entries of
* all vnis that were using default remote ip
@@ -4518,10 +4459,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_fdb_flush_desc desc = {
- /* Default entry is deleted at vxlan_uninit. */
- .ignore_default_entry = true,
- };
+ struct vxlan_fdb_flush_desc desc = {};
vxlan_flush(vxlan, &desc);
@@ -4784,13 +4722,10 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
- u32 hash_index;
-
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
- f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
goto out;
@@ -4804,7 +4739,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev,
rdst->offloaded = fdb_info->offloaded;
out:
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
static int
@@ -4813,13 +4748,11 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
- u32 hash_index;
int err;
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
@@ -4829,7 +4762,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev,
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
0, false, extack);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -4840,13 +4773,11 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
- u32 hash_index;
int err = 0;
- hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
- f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
err = -ENOENT;
else if (f->flags & NTF_EXT_LEARNED)
@@ -4858,7 +4789,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev,
fdb_info->remote_ifindex,
false);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -4907,18 +4838,15 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh)
{
struct vxlan_fdb *fdb;
struct vxlan_dev *vxlan;
- u32 hash_index;
rcu_read_lock();
list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) {
vxlan = rcu_dereference(fdb->vdev);
WARN_ON(!vxlan);
- hash_index = fdb_head_index(vxlan, fdb->eth_addr,
- vxlan->default_dst.remote_vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
- if (!hlist_unhashed(&fdb->hlist))
+ spin_lock_bh(&vxlan->hash_lock);
+ if (!hlist_unhashed(&fdb->fdb_node))
vxlan_fdb_destroy(vxlan, fdb, false, false);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
rcu_read_unlock();
}
@@ -4966,19 +4894,15 @@ static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
- struct list_head *dev_to_kill)
+static void __net_exit vxlan_exit_rtnl(struct net *net,
+ struct list_head *dev_to_kill)
{
- struct net *net;
-
- ASSERT_RTNL();
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ ASSERT_RTNL_NET(net);
- vxlan_destroy_tunnels(vn, dev_to_kill);
- }
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ vxlan_destroy_tunnels(vn, dev_to_kill);
}
static void __net_exit vxlan_exit_net(struct net *net)
@@ -4992,7 +4916,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit_rtnl = vxlan_exit_rtnl,
.exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
@@ -5002,8 +4926,6 @@ static int __init vxlan_init_module(void)
{
int rc;
- get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
-
rc = register_pernet_subsys(&vxlan_net_ops);
if (rc)
goto out1;
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index 76a351a997d5..d328aed9feef 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -24,18 +24,23 @@ struct vxlan_net {
struct notifier_block nexthop_notifier_block;
};
+struct vxlan_fdb_key {
+ u8 eth_addr[ETH_ALEN];
+ __be32 vni;
+};
+
/* Forwarding table entry */
struct vxlan_fdb {
- struct hlist_node hlist; /* linked list of entries */
+ struct rhash_head rhnode;
struct rcu_head rcu;
unsigned long updated; /* jiffies */
unsigned long used;
struct list_head remotes;
- u8 eth_addr[ETH_ALEN];
+ struct vxlan_fdb_key key;
u16 state; /* see ndm_state */
- __be32 vni;
u16 flags; /* see ndm_flags and below */
struct list_head nh_list;
+ struct hlist_node fdb_node;
struct nexthop __rcu *nh;
struct vxlan_dev __rcu *vdev;
};
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 06d19e90eadb..186d0660669a 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -411,13 +411,12 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb
struct tunnel_msg *tmsg;
struct net_device *dev;
- if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) {
+ tmsg = nlmsg_payload(cb->nlh, sizeof(*tmsg));
+ if (!tmsg) {
NL_SET_ERR_MSG(cb->extack, "Invalid msg length");
return -EINVAL;
}
- tmsg = nlmsg_data(cb->nlh);
-
if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) {
NL_SET_ERR_MSG(cb->extack, "Invalid tunnelmsg flags in ancillary header");
return -EINVAL;
@@ -483,11 +482,9 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
struct netlink_ext_ack *extack)
{
struct vxlan_rdst *dst = &vxlan->default_dst;
- u32 hash_index;
int err = 0;
- hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
if (remote_ip && !vxlan_addr_any(remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
remote_ip,
@@ -499,7 +496,7 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
dst->remote_ifindex,
NTF_SELF, 0, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
}
@@ -512,7 +509,7 @@ static int vxlan_update_default_fdb_entry(struct vxlan_dev *vxlan, __be32 vni,
dst->remote_ifindex,
true);
}
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
return err;
}
@@ -628,10 +625,7 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
!vxlan_addr_any(&dst->remote_ip)) {
- u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
- vninode->vni);
-
- spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ spin_lock_bh(&vxlan->hash_lock);
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
@@ -639,7 +633,7 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ spin_unlock_bh(&vxlan->hash_lock);
}
if (vxlan->dev->flags & IFF_UP) {
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 4b8528206cc8..09f7fcd7da78 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -249,6 +249,52 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return 0;
}
+static void remove_node(struct allowedips_node *node, struct mutex *lock)
+{
+ struct allowedips_node *child, **parent_bit, *parent;
+ bool free_parent;
+
+ list_del_init(&node->peer_list);
+ RCU_INIT_POINTER(node->peer, NULL);
+ if (node->bit[0] && node->bit[1])
+ return;
+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+ lockdep_is_held(lock));
+ if (child)
+ child->parent_bit_packed = node->parent_bit_packed;
+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+ *parent_bit = child;
+ parent = (void *)parent_bit -
+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+ free_parent = !rcu_access_pointer(node->bit[0]) && !rcu_access_pointer(node->bit[1]) &&
+ (node->parent_bit_packed & 3) <= 1 && !rcu_access_pointer(parent->peer);
+ if (free_parent)
+ child = rcu_dereference_protected(parent->bit[!(node->parent_bit_packed & 1)],
+ lockdep_is_held(lock));
+ call_rcu(&node->rcu, node_free_rcu);
+ if (!free_parent)
+ return;
+ if (child)
+ child->parent_bit_packed = parent->parent_bit_packed;
+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+ call_rcu(&parent->rcu, node_free_rcu);
+}
+
+static int remove(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ struct allowedips_node *node;
+
+ if (unlikely(cidr > bits))
+ return -EINVAL;
+ if (!rcu_access_pointer(*trie) || !node_placement(*trie, key, cidr, bits, &node, lock) ||
+ peer != rcu_access_pointer(node->peer))
+ return 0;
+
+ remove_node(node, lock);
+ return 0;
+}
+
void wg_allowedips_init(struct allowedips *table)
{
table->root4 = table->root6 = NULL;
@@ -300,44 +346,38 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
return add(&table->root6, 128, key, cidr, peer, lock);
}
+int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls */
+ u8 key[4] __aligned(__alignof(u32));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 32);
+ return remove(&table->root4, 32, key, cidr, peer, lock);
+}
+
+int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock)
+{
+ /* Aligned so it can be passed to fls64 */
+ u8 key[16] __aligned(__alignof(u64));
+
+ ++table->seq;
+ swap_endian(key, (const u8 *)ip, 128);
+ return remove(&table->root6, 128, key, cidr, peer, lock);
+}
+
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
- struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
- bool free_parent;
+ struct allowedips_node *node, *tmp;
if (list_empty(&peer->allowedips_list))
return;
++table->seq;
- list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
- list_del_init(&node->peer_list);
- RCU_INIT_POINTER(node->peer, NULL);
- if (node->bit[0] && node->bit[1])
- continue;
- child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
- lockdep_is_held(lock));
- if (child)
- child->parent_bit_packed = node->parent_bit_packed;
- parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
- *parent_bit = child;
- parent = (void *)parent_bit -
- offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
- free_parent = !rcu_access_pointer(node->bit[0]) &&
- !rcu_access_pointer(node->bit[1]) &&
- (node->parent_bit_packed & 3) <= 1 &&
- !rcu_access_pointer(parent->peer);
- if (free_parent)
- child = rcu_dereference_protected(
- parent->bit[!(node->parent_bit_packed & 1)],
- lockdep_is_held(lock));
- call_rcu(&node->rcu, node_free_rcu);
- if (!free_parent)
- continue;
- if (child)
- child->parent_bit_packed = parent->parent_bit_packed;
- *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
- call_rcu(&parent->rcu, node_free_rcu);
- }
+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list)
+ remove_node(node, lock);
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
index 2346c797eb4d..931958cb6e10 100644
--- a/drivers/net/wireguard/allowedips.h
+++ b/drivers/net/wireguard/allowedips.h
@@ -38,6 +38,10 @@ int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock);
int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
+int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip,
+ u8 cidr, struct wg_peer *peer, struct mutex *lock);
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock);
/* The ip input pointer should be __aligned(__alignof(u64))) */
diff --git a/drivers/net/wireguard/cookie.c b/drivers/net/wireguard/cookie.c
index f89581b5e8cb..94d0a7206084 100644
--- a/drivers/net/wireguard/cookie.c
+++ b/drivers/net/wireguard/cookie.c
@@ -26,8 +26,8 @@ void wg_cookie_checker_init(struct cookie_checker *checker,
}
enum { COOKIE_KEY_LABEL_LEN = 8 };
-static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
-static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] __nonstring = "mac1----";
+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] __nonstring = "cookie--";
static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index f7055180ba4a..67f962eb8b46 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -24,7 +24,7 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
- [WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
+ [WGDEVICE_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, __WGDEVICE_F_ALL),
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
[WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
@@ -33,7 +33,7 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
[WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
- [WGPEER_A_FLAGS] = { .type = NLA_U32 },
+ [WGPEER_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, __WGPEER_F_ALL),
[WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
[WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
@@ -46,7 +46,8 @@ static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
[WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
- [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 },
+ [WGALLOWEDIP_A_FLAGS] = NLA_POLICY_MASK(NLA_U32, __WGALLOWEDIP_F_ALL),
};
static struct wg_device *lookup_interface(struct nlattr **attrs,
@@ -329,6 +330,7 @@ static int set_port(struct wg_device *wg, u16 port)
static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
{
int ret = -EINVAL;
+ u32 flags = 0;
u16 family;
u8 cidr;
@@ -337,19 +339,30 @@ static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
return ret;
family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
+ if (attrs[WGALLOWEDIP_A_FLAGS])
+ flags = nla_get_u32(attrs[WGALLOWEDIP_A_FLAGS]);
if (family == AF_INET && cidr <= 32 &&
- nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
- ret = wg_allowedips_insert_v4(
- &peer->device->peer_allowedips,
- nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
- &peer->device->device_update_lock);
- else if (family == AF_INET6 && cidr <= 128 &&
- nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
- ret = wg_allowedips_insert_v6(
- &peer->device->peer_allowedips,
- nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
- &peer->device->device_update_lock);
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) {
+ if (flags & WGALLOWEDIP_F_REMOVE_ME)
+ ret = wg_allowedips_remove_v4(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ else
+ ret = wg_allowedips_insert_v4(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ } else if (family == AF_INET6 && cidr <= 128 &&
+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) {
+ if (flags & WGALLOWEDIP_F_REMOVE_ME)
+ ret = wg_allowedips_remove_v6(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ else
+ ret = wg_allowedips_insert_v6(&peer->device->peer_allowedips,
+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr,
+ peer, &peer->device->device_update_lock);
+ }
return ret;
}
@@ -373,9 +386,6 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_FLAGS])
flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
- ret = -EOPNOTSUPP;
- if (flags & ~__WGPEER_F_ALL)
- goto out;
ret = -EPFNOSUPPORT;
if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
@@ -506,9 +516,6 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[WGDEVICE_A_FLAGS])
flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
- ret = -EOPNOTSUPP;
- if (flags & ~__WGDEVICE_F_ALL)
- goto out;
if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
struct net *net;
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 202a33af5a72..7eb9a23a3d4d 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -25,8 +25,8 @@
* <- e, ee, se, psk, {}
*/
-static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
-static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
+static const u8 handshake_name[37] __nonstring = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
+static const u8 identifier_name[34] __nonstring = "WireGuard v1 zx2c4 Jason@zx2c4.com";
static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
static atomic64_t keypair_counter = ATOMIC64_INIT(0);
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 25de7058701a..41837efa70cb 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -460,6 +460,10 @@ static __init struct wg_peer *init_peer(void)
wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
cidr, mem, &mutex)
+#define remove(version, mem, ipa, ipb, ipc, ipd, cidr) \
+ wg_allowedips_remove_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
+ cidr, mem, &mutex)
+
#define maybe_fail() do { \
++i; \
if (!_s) { \
@@ -585,6 +589,50 @@ bool __init wg_allowedips_selftest(void)
test_negative(4, a, 192, 0, 0, 0);
test_negative(4, a, 255, 0, 0, 0);
+ insert(4, a, 1, 0, 0, 0, 32);
+ insert(4, a, 192, 0, 0, 0, 24);
+ insert(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ insert(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test(4, a, 1, 0, 0, 0);
+ test(4, a, 192, 0, 0, 1);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ test(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+ /* Must be an exact match to remove */
+ remove(4, a, 192, 0, 0, 0, 32);
+ test(4, a, 192, 0, 0, 1);
+ /* NULL peer should have no effect and return 0 */
+ test_boolean(!remove(4, NULL, 192, 0, 0, 0, 24));
+ test(4, a, 192, 0, 0, 1);
+ /* different peer should have no effect and return 0 */
+ test_boolean(!remove(4, b, 192, 0, 0, 0, 24));
+ test(4, a, 192, 0, 0, 1);
+ /* invalid CIDR should have no effect and return -EINVAL */
+ test_boolean(remove(4, b, 192, 0, 0, 0, 33) == -EINVAL);
+ test(4, a, 192, 0, 0, 1);
+ remove(4, a, 192, 0, 0, 0, 24);
+ test_negative(4, a, 192, 0, 0, 1);
+ remove(4, a, 1, 0, 0, 0, 32);
+ test_negative(4, a, 1, 0, 0, 0);
+ /* Must be an exact match to remove */
+ remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 96);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* NULL peer should have no effect and return 0 */
+ test_boolean(!remove(6, NULL, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128));
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* different peer should have no effect and return 0 */
+ test_boolean(!remove(6, b, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128));
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* invalid CIDR should have no effect and return -EINVAL */
+ test_boolean(remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 129) == -EINVAL);
+ test(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ remove(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
+ test_negative(6, a, 0x24446801, 0x40e40800, 0xdeaebeef, 0xdefbeef);
+ /* Must match the peer to remove */
+ remove(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+ remove(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
+ test_negative(6, a, 0x24446800, 0xf0e40800, 0xeeaebeef, 0x10101010);
+
wg_allowedips_free(&t, &mutex);
wg_allowedips_init(&t);
insert(4, a, 192, 168, 0, 0, 16);
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index db9f9ebcb62d..eb8b35b6224d 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -497,7 +497,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 9a4f8e815412..48efdc71d54d 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -349,7 +349,7 @@ static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 l
int ret;
size_t buf_len;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -395,7 +395,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -461,7 +461,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
- "bmi fast download address 0x%x buffer 0x%pK length %d\n",
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index afae4a8027f8..a89a7491a76c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -80,7 +80,7 @@ static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
static inline unsigned int
ath10k_set_ring_byte(unsigned int offset,
- struct ath10k_hw_ce_regs_addr_map *addr_map)
+ const struct ath10k_hw_ce_regs_addr_map *addr_map)
{
return ((offset << addr_map->lsb) & addr_map->mask);
}
@@ -203,7 +203,7 @@ static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -217,7 +217,7 @@ static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -231,7 +231,7 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ctrl_regs->addr);
@@ -313,7 +313,7 @@ static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -325,7 +325,7 @@ static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
@@ -337,7 +337,7 @@ static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -349,7 +349,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int n)
{
- struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
@@ -360,7 +360,7 @@ static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -372,7 +372,7 @@ static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -384,7 +384,7 @@ static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
ar->hw_ce_regs->host_ie_addr);
@@ -396,7 +396,7 @@ static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
u32 ce_ctrl_addr)
{
- struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
u32 misc_ie_addr = ath10k_ce_read32(ar,
ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
@@ -410,7 +410,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int mask)
{
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
}
@@ -1230,7 +1230,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
- struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
/*
@@ -1388,7 +1388,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot init ce src ring id %d entries %d base_addr %pK\n",
+ "boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@@ -1426,7 +1426,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %pK\n",
+ "boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 6d336e39d673..fe3a8f4a1cc1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1200,7 +1200,7 @@ static int ath10k_download_fw(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %pK len %d\n",
+ "boot uploading firmware image %p len %d\n",
data, data_len);
/* Check if device supports to download firmware via
@@ -1826,7 +1826,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
if (!ar->running_fw->fw_file.otp_data ||
!ar->running_fw->fw_file.otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->running_fw->fw_file.otp_data,
ar->running_fw->fw_file.otp_len);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index a6e21ce90bad..2da08dfebd3e 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -34,7 +34,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
@@ -54,7 +54,7 @@ void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct ath10k *ar = ep->htc->ar;
struct ath10k_htc_hdr *hdr;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
/* A corner case where the copy completion is reaching to host but still
@@ -515,7 +515,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
/* zero length packet with trailer data, just drop these */
goto out;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 83eab7479f06..52981052e211 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1373,7 +1373,7 @@ static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
}
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 8fafe096adff..84b35a22fc23 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -212,40 +212,40 @@ const struct ath10k_hw_regs wcn3990_regs = {
.pcie_intr_fw_mask = 0x00100000,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(17, 17),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
.msb = 0x00000012,
.lsb = 0x00000012,
.mask = GENMASK(18, 18),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
+static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
.addr = 0x00000018,
.src_ring = &wcn3990_src_ring,
.dst_ring = &wcn3990_dst_ring,
.dmax = &wcn3990_dmax,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
.mask = GENMASK(0, 0),
};
-static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
+static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
.copy_complete = &wcn3990_host_ie_cc,
};
-static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
+static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -255,7 +255,7 @@ static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
.addr = 0x00000030,
};
-static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
+static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.axi_err = 0x00000100,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -266,19 +266,19 @@ static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
.addr = 0x00000038,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
.msb = 0x00000000,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -286,18 +286,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
.wm_high = &wcn3990_src_wm_high,
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -305,7 +305,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.wm_high = &wcn3990_dst_wm_high,
};
-static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
.shift = 19,
.mask = 0x00080000,
.enable = 0x00000000,
@@ -344,25 +344,25 @@ const struct ath10k_hw_values wcn3990_values = {
.ce_desc_meta_data_lsb = 4,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
.msb = 0x00000010,
.lsb = 0x00000010,
.mask = GENMASK(16, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
.msb = 0x00000011,
.lsb = 0x00000011,
.mask = GENMASK(17, 17),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.addr = 0x00000010,
.hw_mask = 0x0007ffff,
.sw_mask = 0x0007ffff,
@@ -375,31 +375,31 @@ static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
.dmax = &qcax_dmax,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
.msb = 0x00000003,
.lsb = 0x00000003,
.mask = GENMASK(3, 3),
};
-static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
.msb = 0x00000000,
.mask = GENMASK(0, 0),
.status_reset = 0x00000000,
.status = &qcax_cmd_halt_status,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
.msb = 0x00000000,
.lsb = 0x00000000,
.mask = GENMASK(0, 0),
};
-static struct ath10k_hw_ce_host_ie qcax_host_ie = {
+static const struct ath10k_hw_ce_host_ie qcax_host_ie = {
.copy_complete_reset = 0x00000000,
.copy_complete = &qcax_host_ie_cc,
};
-static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.dstr_lmask = 0x00000010,
.dstr_hmask = 0x00000008,
.srcr_lmask = 0x00000004,
@@ -409,7 +409,7 @@ static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
.addr = 0x00000030,
};
-static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.axi_err = 0x00000400,
.dstr_add_err = 0x00000200,
.srcr_len_err = 0x00000100,
@@ -420,19 +420,19 @@ static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
.addr = 0x00000038,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
.msb = 0x0000001f,
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.addr = 0x0000004c,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
@@ -440,18 +440,18 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
.wm_high = &qcax_src_wm_high,
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
.lsb = 0x00000010,
.mask = GENMASK(31, 16),
};
-static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
.msb = 0x0000000f,
.lsb = 0x00000000,
.mask = GENMASK(15, 0),
};
-static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
.addr = 0x00000050,
.low_rst = 0x00000000,
.high_rst = 0x00000000,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 442091c6dfd2..7ffa1fbe2874 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -289,19 +289,22 @@ struct ath10k_hw_ce_ctrl1 {
u32 sw_wr_mask;
u32 reset_mask;
u32 reset;
- struct ath10k_hw_ce_regs_addr_map *src_ring;
- struct ath10k_hw_ce_regs_addr_map *dst_ring;
- struct ath10k_hw_ce_regs_addr_map *dmax; };
+ const struct ath10k_hw_ce_regs_addr_map *src_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dst_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dmax;
+};
struct ath10k_hw_ce_cmd_halt {
u32 status_reset;
u32 msb;
u32 mask;
- struct ath10k_hw_ce_regs_addr_map *status; };
+ const struct ath10k_hw_ce_regs_addr_map *status;
+};
struct ath10k_hw_ce_host_ie {
u32 copy_complete_reset;
- struct ath10k_hw_ce_regs_addr_map *copy_complete; };
+ const struct ath10k_hw_ce_regs_addr_map *copy_complete;
+};
struct ath10k_hw_ce_host_wm_regs {
u32 dstr_lmask;
@@ -328,8 +331,9 @@ struct ath10k_hw_ce_dst_src_wm_regs {
u32 addr;
u32 low_rst;
u32 high_rst;
- struct ath10k_hw_ce_regs_addr_map *wm_low;
- struct ath10k_hw_ce_regs_addr_map *wm_high; };
+ const struct ath10k_hw_ce_regs_addr_map *wm_low;
+ const struct ath10k_hw_ce_regs_addr_map *wm_high;
+};
struct ath10k_hw_ce_ctrl1_upd {
u32 shift;
@@ -355,14 +359,14 @@ struct ath10k_hw_ce_regs {
u32 ce_rri_low;
u32 ce_rri_high;
u32 host_ie_addr;
- struct ath10k_hw_ce_host_wm_regs *wm_regs;
- struct ath10k_hw_ce_misc_regs *misc_regs;
- struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
- struct ath10k_hw_ce_cmd_halt *cmd_halt;
- struct ath10k_hw_ce_host_ie *host_ie;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
- struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
- struct ath10k_hw_ce_ctrl1_upd *upd;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+ const struct ath10k_hw_ce_cmd_halt *cmd_halt;
+ const struct ath10k_hw_ce_host_ie *host_ie;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+ const struct ath10k_hw_ce_ctrl1_upd *upd;
};
struct ath10k_hw_values {
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c61b95a928da..8c7ffea0fa44 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -875,7 +875,7 @@ static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
*/
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
peer->addr, peer, i);
ar->peer_map[i] = NULL;
}
@@ -4063,7 +4063,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %p len %d\n",
skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
@@ -4126,7 +4126,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p len %d\n",
skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -4181,7 +4181,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %p, len: %d\n",
skb, skb->len);
if (!peer && tmp_peer_created) {
@@ -7604,7 +7604,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_STA,
- "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ "mac vdev %d peer delete %pM sta %p (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
if (sta->tdls) {
@@ -7631,7 +7631,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
- ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
+ ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
@@ -8811,7 +8811,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %u width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8835,7 +8835,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %u width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -8900,7 +8900,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -8959,7 +8959,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx assign ptr %pK vdev_id %i\n",
+ "mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@@ -9039,7 +9039,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx unassign ptr %pK vdev_id %i\n",
+ "mac chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index fb2c60ee433c..20ec0a6d0f71 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3411,7 +3411,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_region;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
return 0;
err_region:
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 7ce74b4ef201..f3212eab56a1 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1844,7 +1844,7 @@ static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
ret = ath10k_sdio_diag_read32(ar, addr, &val);
if (ret) {
ath10k_warn(ar,
- "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
+ "unable to read hi_acs_flags for htt tx complete: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 7a9b9bbcdbfc..3fcefc55b74f 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -35,7 +35,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
+ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -397,7 +397,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index da3bc35e41aa..493bfb410aff 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -35,7 +35,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3b51b7f52130..1732a4f98418 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -131,7 +131,7 @@ static void ath10k_usb_recv_complete(struct urb *urb)
int status = 0;
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
- "usb recv pipe %d stat %d len %d urb 0x%pK\n",
+ "usb recv pipe %d stat %d len %d urb 0x%p\n",
pipe->logical_pipe_num, urb->status, urb->actual_length,
urb);
@@ -230,7 +230,7 @@ static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
ath10k_usb_recv_complete, urb_context);
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
- "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n",
+ "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%p\n",
recv_pipe->logical_pipe_num,
recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 5e061f7525a6..df6a24f8f8d5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2029,7 +2029,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2637,7 +2637,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->boottime_ns = ktime_get_boottime_ns();
ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 2f862f8f10ca..fde1ce43c499 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-static void ath11k_ahb_power_down(struct ath11k_base *ab)
+static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -1280,7 +1280,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
+ ath11k_ahb_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index e66e86bdec20..9d8efec46508 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -393,11 +393,10 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
goto err;
}
+ /* Make sure descriptor is read after the head pointer. */
+ dma_rmb();
+
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
- if (*nbytes == 0) {
- ret = -EIO;
- goto err;
- }
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -430,8 +429,8 @@ static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
- if (unlikely(max_nbytes < nbytes)) {
- ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 3d39ff85ba94..2e9f8a5e61e4 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -907,12 +907,51 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
};
-static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
-{
- WARN_ON(!ab->hw_params.single_pdev_only);
-
- return &ab->pdevs[0];
-}
+static const struct dmi_system_id ath11k_pm_quirk_table[] = {
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
+ },
+ },
+ {}
+};
void ath11k_fw_stats_pdevs_free(struct list_head *head)
{
@@ -972,23 +1011,33 @@ bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab)
return ab->hw_params.coldboot_cal_mm;
}
-int ath11k_core_suspend(struct ath11k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath11k_core_continue_suspend_resume(struct ath11k_base *ab)
{
- int ret;
- struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
/* so far single_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
+ * so pass 0 as a dummy pdev_id here.
*/
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
+ ar = ab->pdevs[0].ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
+ return 1;
+}
+
+static int ath11k_core_suspend_wow(struct ath11k_base *ab)
+{
+ int ret;
+
ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
@@ -996,7 +1045,10 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_mac_wait_tx_complete(ar);
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
if (ret) {
ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
@@ -1029,24 +1081,146 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return 0;
}
+
+static int ath11k_core_suspend_default(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath11k_ce_stop_shadow_timers(ab);
+ ath11k_dp_stop_shadow_timers(ab);
+
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath11k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath11k_core_restart(), making ath11k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath11k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+
+int ath11k_core_suspend(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_suspend_wow(ab);
+
+ return ath11k_core_suspend_default(ab);
+}
EXPORT_SYMBOL(ath11k_core_suspend);
-int ath11k_core_resume(struct ath11k_base *ab)
+int ath11k_core_suspend_late(struct ath11k_base *ab)
{
int ret;
- struct ath11k_pdev *pdev;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ath11k_hif_power_down(ab, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend_late);
+
+int ath11k_core_resume_early(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ reinit_completion(&ab->restart_completed);
+ ret = ath11k_hif_power_up(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_resume_early);
+
+static int ath11k_core_resume_default(struct ath11k_base *ab)
+{
struct ath11k *ar;
+ long time_left;
+ int ret;
- if (!ab->hw_params.supports_suspend)
- return -EOPNOTSUPP;
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH11K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
+ }
- /* so far signle_pdev_only chips have supports_suspend as true
- * and only the first pdev is valid.
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
*/
- pdev = ath11k_core_get_single_pdev(ab);
- ar = pdev->ar;
- if (!ar || ar->state != ATH11K_STATE_OFF)
- return 0;
+ ar = ab->pdevs[0].ar;
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_dp_rx_pktlog_start(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_core_resume_wow(struct ath11k_base *ab)
+{
+ int ret;
ret = ath11k_hif_resume(ab);
if (ret) {
@@ -1072,6 +1246,20 @@ int ath11k_core_resume(struct ath11k_base *ab)
return 0;
}
+
+int ath11k_core_resume(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_resume_wow(ab);
+
+ return ath11k_core_resume_default(ab);
+}
EXPORT_SYMBOL(ath11k_core_resume);
static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
@@ -2050,6 +2238,7 @@ err_hal_srng_deinit:
void ath11k_core_halt(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
+ struct list_head *pos, *n;
lockdep_assert_held(&ar->conf_mutex);
@@ -2065,7 +2254,12 @@ void ath11k_core_halt(struct ath11k *ar)
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
- INIT_LIST_HEAD(&ar->arvifs);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
idr_init(&ar->txmgmt_idr);
}
@@ -2205,6 +2399,8 @@ static void ath11k_core_restart(struct work_struct *work)
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
+
+ complete(&ab->restart_completed);
}
static void ath11k_core_reset(struct work_struct *work)
@@ -2275,7 +2471,7 @@ static void ath11k_core_reset(struct work_struct *work)
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
@@ -2325,10 +2521,62 @@ int ath11k_core_pre_init(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_core_pre_init);
+static int ath11k_core_pm_notify(struct notifier_block *nb,
+ unsigned long action, void *nouse)
+{
+ struct ath11k_base *ab = container_of(nb, struct ath11k_base,
+ pm_nb);
+
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ ab->actual_pm_policy = ab->pm_policy;
+ break;
+ case PM_HIBERNATION_PREPARE:
+ ab->actual_pm_policy = ATH11K_PM_DEFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ath11k_core_pm_notifier_register(struct ath11k_base *ab)
+{
+ ab->pm_nb.notifier_call = ath11k_core_pm_notify;
+ return register_pm_notifier(&ab->pm_nb);
+}
+
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = unregister_pm_notifier(&ab->pm_nb);
+ if (ret)
+ /* just warn here, there is nothing can be done in fail case */
+ ath11k_warn(ab, "failed to unregister PM notifier %d\n", ret);
+}
+EXPORT_SYMBOL(ath11k_core_pm_notifier_unregister);
+
int ath11k_core_init(struct ath11k_base *ab)
{
+ const struct dmi_system_id *dmi_id;
int ret;
+ dmi_id = dmi_first_match(ath11k_pm_quirk_table);
+ if (dmi_id)
+ ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data;
+ else
+ ab->pm_policy = ATH11K_PM_DEFAULT;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy);
+
+ ret = ath11k_core_pm_notifier_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to register PM notifier: %d\n", ret);
+ return ret;
+ }
+
ret = ath11k_core_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create soc core: %d\n", ret);
@@ -2348,9 +2596,10 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
+ ath11k_core_pm_notifier_unregister(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
@@ -2401,6 +2650,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
+ init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 1a3d0de4afde..339d4fca1ed5 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -16,6 +16,7 @@
#include <linux/rhashtable.h>
#include <linux/average.h>
#include <linux/firmware.h>
+#include <linux/suspend.h>
#include "qmi.h"
#include "htc.h"
@@ -892,6 +893,11 @@ struct ath11k_msi_config {
u16 hw_rev;
};
+enum ath11k_pm_policy {
+ ATH11K_PM_DEFAULT,
+ ATH11K_PM_WOW,
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath11k_base {
enum ath11k_hw_rev hw_rev;
@@ -1050,6 +1056,8 @@ struct ath11k_base {
DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
} fw;
+ struct completion restart_completed;
+
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
@@ -1058,6 +1066,10 @@ struct ath11k_base {
} testmode;
#endif
+ enum ath11k_pm_policy pm_policy;
+ enum ath11k_pm_policy actual_pm_policy;
+ struct notifier_block pm_nb;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1249,8 +1261,10 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume_early(struct ath11k_base *ab);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
+int ath11k_core_suspend_late(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
@@ -1322,4 +1336,6 @@ static inline const char *ath11k_bus_str(enum ath11k_bus bus)
return "unknown";
}
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab);
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 218ab41c0f3c..ea2959305dec 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k *ar;
struct hal_reo_dest_ring *desc;
enum hal_reo_dest_ring_push_reason push_reason;
- u32 cookie;
+ u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0;
int i;
for (i = 0; i < MAX_RADIOS; i++)
@@ -2650,11 +2650,14 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
ath11k_hal_srng_access_begin(ab, srng);
+ /* Make sure descriptor is read after the head pointer. */
+ dma_rmb();
+
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- desc->buf_addr_info.info1);
+ READ_ONCE(desc->buf_addr_info.info1));
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2683,8 +2686,9 @@ try_again:
num_buffs_reaped[mac_id]++;
+ info0 = READ_ONCE(desc->info0);
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- desc->info0);
+ info0);
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
@@ -2692,18 +2696,21 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0);
+ rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0);
+
+ rxcb->is_first_msdu = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_last_msdu = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ rxcb->is_continuation = !!(rx_msdu_info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
- desc->rx_mpdu_info.meta_data);
+ READ_ONCE(desc->rx_mpdu_info.meta_data));
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
- desc->rx_mpdu_info.info0);
+ rx_mpdu_info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- desc->info0);
+ info0);
rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list[mac_id], msdu);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 61f4b6dd5380..8cb1505a5a0c 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -599,7 +599,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf)
struct hal_ce_srng_dst_status_desc *desc = buf;
u32 len;
- len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags));
desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
return len;
@@ -829,7 +829,7 @@ void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
} else {
- srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 770c39ff99b4..cd9c4b838246 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HIF_H_
@@ -18,7 +18,7 @@ struct ath11k_hif_ops {
int (*start)(struct ath11k_base *ab);
void (*stop)(struct ath11k_base *ab);
int (*power_up)(struct ath11k_base *ab);
- void (*power_down)(struct ath11k_base *ab);
+ void (*power_down)(struct ath11k_base *ab, bool is_suspend);
int (*suspend)(struct ath11k_base *ab);
int (*resume)(struct ath11k_base *ab);
int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
@@ -68,12 +68,18 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
static inline int ath11k_hif_power_up(struct ath11k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath11k_hif_power_down(struct ath11k_base *ab)
+static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
}
static inline int ath11k_hif_suspend(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 97816916abac..08d7b136851f 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1531,8 +1531,14 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
{
- if (arvif->vif->mbssid_tx_vif)
- return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+ struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
+
+ lockdep_assert_wiphy(arvif->ar->hw->wiphy);
+
+ link_conf = &arvif->vif->bss_conf;
+ tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf);
+ if (tx_bss_conf)
+ return ath11k_vif_to_arvif(tx_bss_conf->vif);
return NULL;
}
@@ -9966,12 +9972,17 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
struct ath11k_base *ab = ar->ab;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
- int n_limits;
+ int n_limits, n_combos;
bool p2p;
p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (ab->hw_params.support_dual_stations)
+ n_combos = 2;
+ else
+ n_combos = 1;
+
+ combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
@@ -9986,7 +9997,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
+ limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+ limits[1].max = 16;
limits[1].types |= BIT(NL80211_IFTYPE_AP);
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
@@ -9996,25 +10009,24 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].n_limits = n_limits;
combinations[0].beacon_int_infra_match = true;
combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
- limits[1].max = 1;
-
- combinations[0].max_interfaces = ab->hw_params.num_vdevs;
- combinations[0].num_different_channels = 2;
- } else {
- limits[0].max = 1;
- limits[1].max = 16;
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[1].limits = limits;
+ combinations[1].n_limits = n_limits;
+ combinations[1].beacon_int_infra_match = true;
+ combinations[1].beacon_int_min_gcd = 100;
+ combinations[1].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[1].num_different_channels = 2;
}
if (p2p) {
@@ -10025,7 +10037,7 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
}
ar->hw->wiphy->iface_combinations = combinations;
- ar->hw->wiphy->n_iface_combinations = 1;
+ ar->hw->wiphy->n_iface_combinations = n_combos;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index fc77eac83e95..acd76e9392d3 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -454,9 +454,17 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci)
return 0;
}
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
{
- mhi_power_down(ab_pci->mhi_ctrl, true);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath11k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ else
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index 651470091bd5..5c5c2b03c81f 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_MHI_H
#define _ATH11K_MHI_H
@@ -18,7 +18,7 @@
#define MHICTRL_RESET_MASK 0x2
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
-void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 412f4a134e4a..78444f8ea153 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -821,7 +821,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return 0;
}
-static void ath11k_pci_power_down(struct ath11k_base *ab)
+static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -832,7 +832,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
- ath11k_mhi_stop(ab_pci);
+ ath11k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -929,7 +929,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
- u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ u32 soc_hw_version_major, soc_hw_version_minor;
int ret;
u32 sub_version;
@@ -955,8 +955,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
* from DT. If memory is reserved from DT for FW, ath11k driver need not
* allocate memory.
*/
- ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
- if (!ret)
+ if (of_property_present(ab->dev->of_node, "memory-region"))
set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
ret = ath11k_pci_claim(ab_pci, pdev);
@@ -1161,9 +1160,10 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
+ ath11k_core_pm_notifier_unregister(ab);
goto qmi_fail;
}
@@ -1192,7 +1192,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev)
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
@@ -1229,9 +1229,39 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
- ath11k_pci_pm_suspend,
- ath11k_pci_pm_resume);
+static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_suspend_late(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ /* Similar to ath11k_pci_pm_suspend(), we return success here
+ * even error happens, to allow system suspend/hibernation survive.
+ */
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_resume_early(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
+ ath11k_pci_pm_resume_early)
+};
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 4f8b08ed1bbc..2782f4723e41 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -1993,6 +1993,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->prev_size == chunk->size)
continue;
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n",
+ chunk->size, chunk->type,
+ chunk->prev_size, chunk->prev_type);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
+
/* cannot reuse the existing chunk */
dma_free_coherent(ab->dev, chunk->prev_size,
chunk->vaddr, chunk->paddr);
@@ -2887,7 +2896,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
}
/* reset the firmware */
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 9be1cd742339..a9751ea2a0b7 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -107,7 +107,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
u32 pdev_id;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
- "event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ "event wmi cmd_id %d ftm event msg %p datalen %d\n",
cmd_id, ftm_msg, length);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index 52a1bb19e3da..b3b15e1eb282 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -15,6 +15,14 @@ config ATH12K
If you choose to build a module, it'll be called ath12k.
+config ATH12K_AHB
+ bool "QTI ath12k AHB support"
+ depends on ATH12K && REMOTEPROC
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ help
+ Enable support for Ath12k AHB bus chipsets, example IPQ5332.
+
config ATH12K_DEBUG
bool "ath12k debugging"
depends on ATH12K
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 60644cb42c76..d95ee525a6cd 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,6 +23,7 @@ ath12k-y += core.o \
fw.o \
p2p.o
+ath12k-$(CONFIG_ATH12K_AHB) += ahb.o
ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
new file mode 100644
index 000000000000..8d1a86e420a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem_state.h>
+#include "ahb.h"
+#include "debug.h"
+#include "hif.h"
+
+static const struct of_device_id ath12k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq5332-wifi",
+ .data = (void *)ATH12K_HW_IPQ5332_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
+
+#define ATH12K_IRQ_CE0_OFFSET 4
+#define ATH12K_MAX_UPDS 1
+#define ATH12K_UPD_IRQ_WRD_LEN 18
+static const char ath12k_userpd_irq[][9] = {"spawn",
+ "ready",
+ "stop-ack"};
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring4,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ return ioread32(ab->mem_ce + offset);
+ return ioread32(ab->mem + offset);
+}
+
+static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
+ u32 value)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ iowrite32(value, ab->mem_ce + offset);
+ else
+ iowrite32(value, ab->mem + offset);
+}
+
+static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ cancel_work_sync(&ce_pipe->intr_wq);
+ }
+}
+
+static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+static int ath12k_ahb_start(struct ath12k_base *ab)
+{
+ ath12k_ahb_ce_irqs_enable(ab);
+ ath12k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
+{
+ struct ath12k_ext_irq_grp *irq_grp;
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ __ath12k_ahb_ext_irq_disable(ab);
+ ath12k_ahb_sync_ext_irqs(ab);
+}
+
+static void ath12k_ahb_stop(struct ath12k_base *ab)
+{
+ if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath12k_ahb_ce_irqs_disable(ab);
+ ath12k_ahb_sync_ce_irqs(ab);
+ ath12k_ahb_cancel_workqueue(ab);
+ timer_delete_sync(&ab->rx_replenish_retry);
+ ath12k_ce_cleanup_pipes(ab);
+}
+
+static int ath12k_ahb_power_up(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ char fw_name[ATH12K_USERPD_FW_NAME_LEN];
+ char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
+ struct device *dev = ab->dev;
+ const struct firmware *fw, *fw2;
+ struct reserved_mem *rmem = NULL;
+ unsigned long time_left;
+ phys_addr_t mem_phys;
+ void *mem_region;
+ size_t mem_size;
+ u32 pasid;
+ int ret;
+
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem)
+ return -ENODEV;
+
+ mem_phys = rmem->base;
+ mem_size = rmem->size;
+ mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
+ if (IS_ERR(mem_region)) {
+ ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ return PTR_ERR(mem_region);
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
+ ATH12K_AHB_FW_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
+ fw->size);
+
+ if (!fw->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+
+ /* Load FW image to a reserved memory location */
+ ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
+ &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw;
+ }
+
+ snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW2);
+
+ ret = request_firmware(&fw2, fw2_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ goto err_fw;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
+ fw2->size);
+
+ if (!fw2->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw2;
+ }
+
+ ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, pasid, mem_region, mem_phys,
+ mem_size, &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Authenticate FW image using peripheral ID */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+ if (ret) {
+ ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Instruct Q6 to spawn userPD thread */
+ ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
+ BIT(ab_ahb->spawn_bit));
+ if (ret) {
+ ath12k_err(ab, "Failed to update spawn state %d\n", ret);
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
+ ATH12K_USERPD_SPAWN_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD spawn wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
+ ATH12K_USERPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD ready wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
+
+err_fw2:
+ release_firmware(fw2);
+err_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ u32 pasid;
+ int ret;
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
+ BIT(ab_ahb->stop_bit));
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
+ ATH12K_USERPD_STOP_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD stop wait timed out\n");
+ return;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+ /* Release the firmware */
+ ret = qcom_scm_pas_shutdown(pasid);
+ if (ret)
+ ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
+ ab_ahb->userpd_id, ret);
+}
+
+static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
+{
+ struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ab->hw_params->target_ce_count;
+ cfg->tgt_ce = ab->hw_params->target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
+ ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+}
+
+static void ath12k_ahb_ce_workqueue(struct work_struct *work)
+{
+ struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
+
+ ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ queue_work(system_bh_wq, &ce_pipe->intr_wq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath12k_ext_irq_grp,
+ napi);
+ struct ath12k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ext_irq_grp *irq_grp = arg;
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_ring_mask *ring_mask;
+ struct ath12k_ext_irq_grp *irq_grp;
+ const struct hal_ops *hal_ops;
+ int i, j, irq, irq_idx, ret;
+ u32 num_irq;
+
+ ring_mask = ab->hw_params->ring_mask;
+ hal_ops = ab->hw_params->hal_ops;
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
+ ath12k_ahb_ext_grp_napi_poll);
+
+ for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
+ /* For TX ring, ensure that the ring mask and the
+ * tcl_to_wbm_rbm_map point to the same ring number.
+ */
+ if (ring_mask->tx[i] &
+ BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ring_mask->rx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ring_mask->rx_err[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ring_mask->rx_wbm_rel[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ring_mask->reo_status[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (ring_mask->rx_mon_dest[i] & BIT(j))
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_destination_mac1;
+ }
+
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(ab->dev, irq,
+ ath12k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret)
+ ath12k_warn(ab, "failed request_irq for %d\n", irq);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_config_irq(struct ath12k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+
+ INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath12k_ahb_config_ext_irq(ab);
+
+ return ret;
+}
+
+static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ u32 pipedir;
+ int i;
+
+ for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params->svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ pipedir = __le32_to_cpu(entry->pipedir);
+ if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ }
+
+ if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
+ .start = ath12k_ahb_start,
+ .stop = ath12k_ahb_stop,
+ .read32 = ath12k_ahb_read32,
+ .write32 = ath12k_ahb_write32,
+ .irq_enable = ath12k_ahb_ext_irq_enable,
+ .irq_disable = ath12k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
+ .power_up = ath12k_ahb_power_up,
+ .power_down = ath12k_ahb_power_down,
+};
+
+static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
+{
+ struct ath12k_base *ab = data;
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
+ complete(&ab_ahb->userpd_spawned);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
+ complete(&ab_ahb->userpd_ready);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ]) {
+ complete(&ab_ahb->userpd_stopped);
+ } else {
+ ath12k_err(ab, "Invalid userpd interrupt\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int i, ret;
+ char *upd_irq_name;
+
+ for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
+ ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
+ ath12k_userpd_irq[i]);
+ if (ab_ahb->userpd_irq_num[i] < 0)
+ return ab_ahb->userpd_irq_num[i];
+
+ upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
+ GFP_KERNEL);
+ if (!upd_irq_name)
+ return -ENOMEM;
+
+ scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
+ ab_ahb->userpd_id, ath12k_userpd_irq[i]);
+ ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
+ NULL, ath12k_userpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ upd_irq_name, ab);
+ if (ret)
+ return dev_err_probe(&ab->pdev->dev, ret,
+ "Request %s irq failed: %d\n",
+ ath12k_userpd_irq[i], ret);
+ }
+
+ ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
+ &ab_ahb->spawn_bit);
+ if (IS_ERR(ab_ahb->spawn_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
+ "Failed to acquire spawn state\n");
+
+ ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
+ &ab_ahb->stop_bit);
+ if (IS_ERR(ab_ahb->stop_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
+ "Failed to acquire stop state\n");
+
+ init_completion(&ab_ahb->userpd_spawned);
+ init_completion(&ab_ahb->userpd_ready);
+ init_completion(&ab_ahb->userpd_stopped);
+ return 0;
+}
+
+static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
+ const unsigned long event, void *data)
+{
+ struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
+ struct ath12k_base *ab = ab_ahb->ab;
+
+ if (event == ATH12K_RPROC_AFTER_POWERUP) {
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
+ complete(&ab_ahb->rootpd_ready);
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
+ init_completion(&ab_ahb->rootpd_ready);
+
+ ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
+ &ab_ahb->root_pd_nb);
+ if (IS_ERR(ab_ahb->root_pd_notifier))
+ return PTR_ERR(ab_ahb->root_pd_notifier);
+
+ return 0;
+}
+
+static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (!ab_ahb->root_pd_notifier) {
+ ath12k_err(ab, "Rproc notifier not registered\n");
+ return;
+ }
+
+ qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
+ &ab_ahb->root_pd_nb);
+ ab_ahb->root_pd_notifier = NULL;
+}
+
+static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct device *dev = ab->dev;
+ struct device_node *np;
+ struct rproc *prproc;
+
+ np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
+ if (!np) {
+ ath12k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(np->phandle);
+ of_node_put(np);
+ if (!prproc)
+ return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
+ "failed to get rproc\n");
+
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
+static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ int ret;
+
+ ret = rproc_boot(ab_ahb->tgt_rproc);
+ if (ret < 0) {
+ ath12k_err(ab, "RootPD boot failed\n");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
+ ATH12K_ROOTPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "RootPD ready wait timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int ret;
+
+ ret = ath12k_ahb_get_rproc(ab);
+ if (ret < 0)
+ return ret;
+
+ ret = ath12k_ahb_register_rproc_notifier(ab);
+ if (ret < 0) {
+ ret = dev_err_probe(&ab->pdev->dev, ret,
+ "failed to register rproc notifier\n");
+ goto err_put_rproc;
+ }
+
+ if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
+ ret = ath12k_ahb_boot_root_pd(ab);
+ if (ret < 0) {
+ ath12k_err(ab, "failed to boot the remote processor Q6\n");
+ goto err_unreg_notifier;
+ }
+ }
+
+ return ath12k_ahb_config_rproc_irq(ab);
+
+err_unreg_notifier:
+ ath12k_ahb_unregister_rproc_notifier(ab);
+
+err_put_rproc:
+ rproc_put(ab_ahb->tgt_rproc);
+ return ret;
+}
+
+static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ath12k_ahb_unregister_rproc_notifier(ab);
+ rproc_put(ab_ahb->tgt_rproc);
+}
+
+static int ath12k_ahb_resource_init(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ int ret;
+
+ ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(ab->mem)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
+ goto out;
+ }
+
+ ab->mem_len = resource_size(mem_res);
+
+ if (ab->hw_params->ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
+ /* CE register space is moved out of WCSS and the space is not
+ * contiguous, hence remapping the CE registers to a new space
+ * for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+ ab->ce_remap = true;
+ ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
+ }
+
+ ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
+ if (IS_ERR(ab_ahb->xo_clk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
+ "failed to get xo clock\n");
+ goto err_mem_ce_unmap;
+ }
+
+ ret = clk_prepare_enable(ab_ahb->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
+ goto err_clock_deinit;
+ }
+
+ return 0;
+
+err_clock_deinit:
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+
+err_mem_ce_unmap:
+ ab_ahb->xo_clk = NULL;
+ if (ab->hw_params->ce_remap)
+ iounmap(ab->mem_ce);
+
+err_mem_unmap:
+ ab->mem_ce = NULL;
+ devm_iounmap(ab->dev, ab->mem);
+
+out:
+ ab->mem = NULL;
+ return ret;
+}
+
+static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (ab->mem)
+ devm_iounmap(ab->dev, ab->mem);
+
+ if (ab->mem_ce)
+ iounmap(ab->mem_ce);
+
+ ab->mem = NULL;
+ ab->mem_ce = NULL;
+
+ clk_disable_unprepare(ab_ahb->xo_clk);
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+ ab_ahb->xo_clk = NULL;
+}
+
+static int ath12k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath12k_base *ab;
+ const struct ath12k_hif_ops *hif_ops;
+ struct ath12k_ahb *ab_ahb;
+ enum ath12k_hw_rev hw_rev;
+ u32 addr, userpd_id;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
+ return ret;
+ }
+
+ ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
+ ATH12K_BUS_AHB);
+ if (!ab)
+ return -ENOMEM;
+
+ hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
+ switch (hw_rev) {
+ case ATH12K_HW_IPQ5332_HW10:
+ hif_ops = &ath12k_ahb_hif_ops_ipq5332;
+ userpd_id = ATH12K_IPQ5332_USERPD_ID;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_core_free;
+ }
+
+ ab->hif.ops = hif_ops;
+ ab->pdev = pdev;
+ ab->hw_rev = hw_rev;
+ platform_set_drvdata(pdev, ab);
+ ab_ahb = ath12k_ab_to_ahb(ab);
+ ab_ahb->ab = ab;
+ ab_ahb->userpd_id = userpd_id;
+
+ /* Set fixed_mem_region to true for platforms that support fixed memory
+ * reservation from DT. If memory is reserved from DT for FW, ath12k driver
+ * need not to allocate memory.
+ */
+ if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
+ set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
+
+ ret = ath12k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_ahb_resource_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ goto err_resource_deinit;
+
+ ret = ath12k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath12k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath12k_ahb_configure_rproc(ab);
+ if (ret)
+ goto err_ce_free;
+
+ ret = ath12k_ahb_config_irq(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
+ ret = ath12k_core_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init core: %d\n", ret);
+ goto err_rproc_deconfigure;
+ }
+
+ return 0;
+
+err_rproc_deconfigure:
+ ath12k_ahb_deconfigure_rproc(ab);
+
+err_ce_free:
+ ath12k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+
+err_resource_deinit:
+ ath12k_ahb_resource_deinit(ab);
+
+err_core_free:
+ ath12k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ath12k_ahb_remove_prepare(struct ath12k_base *ab)
+{
+ unsigned long left;
+
+ if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH12K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath12k_warn(ab, "failed to receive recovery response completion\n");
+ }
+
+ set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath12k_ahb_free_resources(struct ath12k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+
+ ath12k_hal_srng_deinit(ab);
+ ath12k_ce_free_pipes(ab);
+ ath12k_ahb_resource_deinit(ab);
+ ath12k_ahb_deconfigure_rproc(ab);
+ ath12k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static void ath12k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath12k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath12k_ahb_power_down(ab, false);
+ goto qmi_fail;
+ }
+
+ ath12k_ahb_remove_prepare(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+qmi_fail:
+ ath12k_core_deinit(ab);
+ ath12k_ahb_free_resources(ab);
+}
+
+static struct platform_driver ath12k_ahb_driver = {
+ .driver = {
+ .name = "ath12k_ahb",
+ .of_match_table = ath12k_ahb_of_match,
+ },
+ .probe = ath12k_ahb_probe,
+ .remove = ath12k_ahb_remove,
+};
+
+int ath12k_ahb_init(void)
+{
+ return platform_driver_register(&ath12k_ahb_driver);
+}
+
+void ath12k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath12k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath12k/ahb.h b/drivers/net/wireless/ath/ath12k/ahb.h
new file mode 100644
index 000000000000..d56244b20a6a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_AHB_H
+#define ATH12K_AHB_H
+
+#include <linux/clk.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include "core.h"
+
+#define ATH12K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH12K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH12K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH12K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+#define ATH12K_PCI_CE_WAKE_IRQ 2
+#define ATH12K_PCI_IRQ_CE0_OFFSET 3
+#define ATH12K_ROOTPD_READY_TIMEOUT (5 * HZ)
+#define ATH12K_RPROC_AFTER_POWERUP QCOM_SSR_AFTER_POWERUP
+#define ATH12K_AHB_FW_PREFIX "q6_fw"
+#define ATH12K_AHB_FW_SUFFIX ".mdt"
+#define ATH12K_AHB_FW2 "iu_fw.mdt"
+#define ATH12K_AHB_UPD_SWID 0x12
+#define ATH12K_USERPD_SPAWN_TIMEOUT (5 * HZ)
+#define ATH12K_USERPD_READY_TIMEOUT (10 * HZ)
+#define ATH12K_USERPD_STOP_TIMEOUT (5 * HZ)
+#define ATH12K_USERPD_ID_MASK GENMASK(9, 8)
+#define ATH12K_USERPD_FW_NAME_LEN 35
+
+enum ath12k_ahb_smp2p_msg_id {
+ ATH12K_AHB_POWER_SAVE_ENTER = 1,
+ ATH12K_AHB_POWER_SAVE_EXIT,
+};
+
+enum ath12k_ahb_userpd_irq {
+ ATH12K_USERPD_SPAWN_IRQ,
+ ATH12K_USERPD_READY_IRQ,
+ ATH12K_USERPD_STOP_ACK_IRQ,
+ ATH12K_USERPD_MAX_IRQ,
+};
+
+struct ath12k_base;
+
+struct ath12k_ahb {
+ struct ath12k_base *ab;
+ struct rproc *tgt_rproc;
+ struct clk *xo_clk;
+ struct completion rootpd_ready;
+ struct notifier_block root_pd_nb;
+ void *root_pd_notifier;
+ struct qcom_smem_state *spawn_state;
+ struct qcom_smem_state *stop_state;
+ struct completion userpd_spawned;
+ struct completion userpd_ready;
+ struct completion userpd_stopped;
+ u32 userpd_id;
+ u32 spawn_bit;
+ u32 stop_bit;
+ int userpd_irq_num[ATH12K_USERPD_MAX_IRQ];
+};
+
+static inline struct ath12k_ahb *ath12k_ab_to_ahb(struct ath12k_base *ab)
+{
+ return (struct ath12k_ahb *)ab->drv_priv;
+}
+
+#ifdef CONFIG_ATH12K_AHB
+int ath12k_ahb_init(void);
+void ath12k_ahb_exit(void);
+#else
+static inline int ath12k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath12k_ahb_exit(void) {};
+#endif
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index be0d669d31fc..47821a3b060b 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
@@ -219,6 +219,96 @@ const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
};
+const struct ce_attr ath12k_host_ce_config_ipq5332[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+ /* CE5: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+ /* CE6: Target autonomous HIF_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE7: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE8: Target HIF memcpy (Generic HIF memcypy) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE9: WMI logging/CFR/Spectral/Radar */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+ /* CE10: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+ /* CE11: Unused */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
@@ -343,11 +433,10 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
goto err;
}
+ /* Make sure descriptor is read after the head pointer. */
+ dma_rmb();
+
*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
- if (*nbytes == 0) {
- ret = -EIO;
- goto err;
- }
*skb = pipe->dest_ring->skb[sw_index];
pipe->dest_ring->skb[sw_index] = NULL;
@@ -380,8 +469,8 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE);
- if (unlikely(max_nbytes < nbytes)) {
- ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 1a14b9fb86b8..57f75899ee03 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -39,8 +39,8 @@
#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
/* CE address/mask */
-#define CE_HOST_IE_ADDRESS 0x00A1803C
-#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_ADDRESS 0x75804C
+#define CE_HOST_IE_2_ADDRESS 0x758050
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
#define CE_HOST_IE_3_SHIFT 0xC
@@ -76,6 +76,17 @@ struct ce_pipe_config {
__le32 reserved;
};
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
@@ -164,6 +175,7 @@ struct ath12k_ce {
extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
+extern const struct ce_attr ath12k_host_ce_config_ipq5332[];
void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
void ath12k_ce_rx_replenish_retry(struct timer_list *t);
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 0b2dec081c6e..31d851d8e688 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -10,15 +10,18 @@
#include <linux/firmware.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include "ahb.h"
#include "core.h"
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
-#include "hif.h"
-#include "fw.h"
#include "debugfs.h"
+#include "fw.h"
+#include "hif.h"
+#include "pci.h"
#include "wow.h"
+static int ahb_err, pci_err;
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
@@ -612,9 +615,74 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
return TARGET_NUM_TIDS(SINGLE);
}
+struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
+ int index)
+{
+ struct device *dev = ab->dev;
+ struct reserved_mem *rmem;
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", index);
+ if (!node) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to parse memory-region for index %d\n", index);
+ return NULL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "unable to get memory-region for index %d\n", index);
+ return NULL;
+ }
+
+ return rmem;
+}
+
+static inline
+void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (ab->hw_group_ref) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
+ ag->id);
+ return;
+ }
+
+ ab->hw_group_ref = true;
+ ag->num_started++;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
+ ag->id, ag->num_started);
+}
+
+static inline
+void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ab->hw_group_ref) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
+ ag->id);
+ return;
+ }
+
+ ab->hw_group_ref = false;
+ ag->num_started--;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
+ ag->id, ag->num_started);
+}
+
static void ath12k_core_stop(struct ath12k_base *ab)
{
- ath12k_core_stopped(ab);
+ ath12k_core_to_group_ref_put(ab);
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
@@ -629,7 +697,7 @@ static void ath12k_core_stop(struct ath12k_base *ab)
/* De-Init of components as needed */
}
-static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+static void ath12k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath12k_base *ab = data;
const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
@@ -651,6 +719,28 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
return;
}
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH12K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = u16_get_bits(smbios->cc_code >> 8, 0xff);
+ ab->new_alpha2[1] = u16_get_bits(smbios->cc_code, 0xff);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH12K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot smbios worldwide regdomain\n");
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
if (!smbios->bdf_enabled) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
return;
@@ -690,7 +780,7 @@ static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
int ath12k_core_check_smbios(struct ath12k_base *ab)
{
ab->qmi.target.bdf_ext[0] = '\0';
- dmi_walk(ath12k_core_check_bdfext, ab);
+ dmi_walk(ath12k_core_check_cc_code_bdfext, ab);
if (ab->qmi.target.bdf_ext[0] == '\0')
return -ENODATA;
@@ -721,6 +811,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
goto err_qmi_deinit;
}
+ ath12k_debugfs_pdev_create(ab);
+
return 0;
err_qmi_deinit:
@@ -851,9 +943,8 @@ static int ath12k_core_start(struct ath12k_base *ab)
ath12k_acpi_set_dsm_func(ab);
- if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
- /* Indicate the core start in the appropriate group */
- ath12k_core_started(ab);
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_to_group_ref_get(ab);
return 0;
@@ -891,6 +982,9 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
ab = ag->ab[i];
if (!ab)
continue;
+
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
ath12k_core_device_cleanup(ab);
}
@@ -1026,6 +1120,8 @@ core_pdev_create:
mutex_lock(&ab->core_lock);
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
ret = ath12k_core_pdev_create(ab);
if (ret) {
ath12k_err(ab, "failed to create pdev core %d\n", ret);
@@ -1084,6 +1180,59 @@ bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
return (ag->num_started == ag->num_devices);
}
+static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_fw_stats_init(struct ath12k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+ init_completion(&ar->fw_stats_complete);
+}
+
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
+{
+ ath12k_fw_stats_pdevs_free(&stats->pdevs);
+ ath12k_fw_stats_vdevs_free(&stats->vdevs);
+ ath12k_fw_stats_bcn_free(&stats->bcn);
+}
+
+void ath12k_fw_stats_reset(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats.fw_stats_done = false;
+ ath12k_fw_stats_free(&ar->fw_stats);
+ spin_unlock_bh(&ar->data_lock);
+}
+
static void ath12k_core_trigger_partner(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag = ab->ag;
@@ -1246,6 +1395,7 @@ static void ath12k_rfkill_work(struct work_struct *work)
void ath12k_core_halt(struct ath12k *ar)
{
+ struct list_head *pos, *n;
struct ath12k_base *ab = ar->ab;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -1258,10 +1408,16 @@ void ath12k_core_halt(struct ath12k *ar)
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->rfkill_work);
+ cancel_work_sync(&ab->update_11d_work);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
synchronize_rcu();
- INIT_LIST_HEAD(&ar->arvifs);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
idr_init(&ar->txmgmt_idr);
}
@@ -1285,14 +1441,28 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
ah->state == ATH12K_HW_STATE_TM)
continue;
+ wiphy_lock(ah->hw->wiphy);
+
+ /* If queue 0 is stopped, it is safe to assume that all
+ * other queues are stopped by driver via
+ * ieee80211_stop_queues() below. This means, there is
+ * no need to stop it again and hence continue
+ */
+ if (ieee80211_queue_stopped(ah->hw, 0)) {
+ wiphy_unlock(ah->hw->wiphy);
+ continue;
+ }
+
ieee80211_stop_queues(ah->hw);
for (j = 0; j < ah->num_radio; j++) {
ar = &ah->radio[j];
ath12k_mac_drain_tx(ar);
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
@@ -1306,13 +1476,47 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
wake_up(&ar->txmgmt_empty_waitq);
+
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ ar->monitor_started = false;
}
+
+ wiphy_unlock(ah->hw->wiphy);
}
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
}
+static void ath12k_update_11d(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ struct wmi_set_current_country_arg arg = {};
+ int ret, i;
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&arg.alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
+ arg.alpha2[0], arg.alpha2[1]);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ memcpy(&ar->alpha2, &arg.alpha2, 2);
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "pdev id %d failed set current country code: %d\n",
+ i, ret);
+ }
+}
+
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k_hw_group *ag = ab->ag;
@@ -1386,19 +1590,30 @@ static void ath12k_core_restart(struct work_struct *work)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
}
+ mutex_lock(&ag->mutex);
+
+ if (!ath12k_core_hw_group_start_ready(ag)) {
+ mutex_unlock(&ag->mutex);
+ goto exit_restart;
+ }
+
for (i = 0; i < ag->num_hw; i++) {
- ah = ath12k_ag_to_ah(ab->ag, i);
+ ah = ath12k_ag_to_ah(ag, i);
ieee80211_restart_hw(ah->hw);
}
+
+ mutex_unlock(&ag->mutex);
}
+exit_restart:
complete(&ab->restart_completed);
}
static void ath12k_core_reset(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
- int reset_count, fail_cont_count;
+ struct ath12k_hw_group *ag = ab->ag;
+ int reset_count, fail_cont_count, i;
long time_left;
if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
@@ -1457,9 +1672,34 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_hif_ce_irq_disable(ab);
ath12k_hif_power_down(ab, false);
- ath12k_hif_power_up(ab);
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+ /* prepare for power up */
+ ab->qmi.num_radios = U8_MAX;
+
+ mutex_lock(&ag->mutex);
+ ath12k_core_to_group_ref_put(ab);
+
+ if (ag->num_started > 0) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "waiting for %d partner device(s) to reset\n",
+ ag->num_started);
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ /* Prepare MLO global memory region for power up */
+ ath12k_qmi_reset_mlo_mem(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_hif_power_up(ab);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+ }
+
+ mutex_unlock(&ag->mutex);
}
int ath12k_core_pre_init(struct ath12k_base *ab)
@@ -1596,9 +1836,9 @@ static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
of_node_put(next_rx_endpoint);
device_count++;
- if (device_count > ATH12K_MAX_SOCS) {
+ if (device_count > ATH12K_MAX_DEVICES) {
ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
- device_count, ATH12K_MAX_SOCS);
+ device_count, ATH12K_MAX_DEVICES);
of_node_put(next_wsi_dev);
return -EINVAL;
}
@@ -1774,7 +2014,7 @@ static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
}
}
-static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
{
struct ath12k_base *ab;
int i;
@@ -1843,20 +2083,18 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
lockdep_assert_held(&ag->mutex);
- /* If more than one devices are grouped, then inter MLO
- * functionality can work still independent of whether internally
- * each device supports single_chip_mlo or not.
- * Only when there is one device, then disable for WCN chipsets
- * till the required driver implementation is in place.
- */
if (ag->num_devices == 1) {
ab = ag->ab[0];
-
- /* WCN chipsets does not advertise in firmware features
- * hence skip checking
- */
- if (ab->hw_params->def_num_link)
+ /* QCN9274 firmware uses firmware IE for MLO advertisement */
+ if (ab->fw.fw_features_valid) {
+ ag->mlo_capable =
+ ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
return;
+ }
+
+ /* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
+ ag->mlo_capable = ab->single_chip_mlo_support;
+ return;
}
ag->mlo_capable = true;
@@ -1869,7 +2107,7 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
/* even if 1 device's firmware feature indicates MLO
* unsupported, make MLO unsupported for the whole group
*/
- if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
+ if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
ag->mlo_capable = false;
return;
}
@@ -1922,10 +2160,9 @@ err:
void ath12k_core_deinit(struct ath12k_base *ab)
{
- ath12k_core_panic_notifier_unregister(ab);
- ath12k_core_hw_group_cleanup(ab->ag);
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
+ ath12k_core_panic_notifier_unregister(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1966,6 +2203,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->reset_work, ath12k_core_reset);
INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
+ INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
@@ -1975,6 +2213,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
+ ab->single_chip_mlo_support = false;
/* Device index used to identify the devices in a group.
*
@@ -1995,5 +2234,31 @@ err_sc_free:
return NULL;
}
-MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
+static int ath12k_init(void)
+{
+ ahb_err = ath12k_ahb_init();
+ if (ahb_err)
+ pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
+
+ pci_err = ath12k_pci_init();
+ if (pci_err)
+ pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
+
+ /* If both failed, return one of the failures (arbitrary) */
+ return ahb_err && pci_err ? ahb_err : 0;
+}
+
+static void ath12k_exit(void)
+{
+ if (!pci_err)
+ ath12k_pci_exit();
+
+ if (!ahb_err)
+ ath12k_ahb_exit();
+}
+
+module_init(ath12k_init);
+module_exit(ath12k_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 3fac4f00d383..941db6e49d6e 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -14,8 +14,10 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/of_reserved_mem.h>
#include <linux/panic_notifier.h>
#include <linux/average.h>
+#include <linux/of.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -62,8 +64,8 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
-#define ATH12K_MAX_SOCS 3
-#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_MAX_DEVICES 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
#define ATH12K_INVALID_GROUP_ID 0xFF
#define ATH12K_INVALID_DEVICE_ID 0xFF
@@ -147,7 +149,8 @@ struct ath12k_skb_rxcb {
enum ath12k_hw_rev {
ATH12K_HW_QCN9274_HW10,
ATH12K_HW_QCN9274_HW20,
- ATH12K_HW_WCN7850_HW20
+ ATH12K_HW_WCN7850_HW20,
+ ATH12K_HW_IPQ5332_HW10,
};
enum ath12k_firmware_mode {
@@ -160,6 +163,7 @@ enum ath12k_firmware_mode {
#define ATH12K_IRQ_NUM_MAX 57
#define ATH12K_EXT_IRQ_NUM_MAX 16
+#define ATH12K_MAX_TCL_RING_NUM 3
struct ath12k_ext_irq_grp {
struct ath12k_base *ab;
@@ -172,9 +176,34 @@ struct ath12k_ext_irq_grp {
struct net_device *napi_ndev;
};
+enum ath12k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH12K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH12K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH12K_SMBIOS_CC_WW = 2,
+};
+
struct ath12k_smbios_bdf {
struct dmi_header hdr;
- u32 padding;
+ u8 features_disabled;
+
+ /* enum ath12k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH12K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH12K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
u8 bdf_enabled;
u8 bdf_ext[];
} __packed;
@@ -219,6 +248,12 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_11d_state {
+ ATH12K_11D_IDLE,
+ ATH12K_11D_PREPARING,
+ ATH12K_11D_RUNNING,
+};
+
enum ath12k_hw_group_flags {
ATH12K_GROUP_FLAG_REGISTERED,
ATH12K_GROUP_FLAG_UNREGISTER,
@@ -238,6 +273,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_EXT_IRQ_ENABLED,
ATH12K_FLAG_QMI_FW_READY_COMPLETE,
ATH12K_FLAG_FTM_SEGMENTED,
+ ATH12K_FLAG_FIXED_MEM_REGION,
};
struct ath12k_tx_conf {
@@ -295,14 +331,20 @@ struct ath12k_link_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u8 vdev_stats_id;
u32 punct_bitmap;
u8 link_id;
struct ath12k_vif *ahvif;
struct ath12k_rekey_data rekey_data;
+ struct ath12k_link_stats link_stats;
+ spinlock_t link_stats_lock; /* Protects updates to link_stats */
u8 current_cntdown_counter;
+
+ /* only used in station mode */
+ bool is_sta_assoc_link;
+
+ struct ath12k_reg_tpc_power_info reg_tpc_info;
};
struct ath12k_vif {
@@ -364,6 +406,8 @@ struct ath12k_vif_iter {
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MAX_NUM_LEGACY_RATES 12
+#define ATH12K_SCAN_TIMEOUT_HZ (20 * HZ)
+
struct ath12k_rx_peer_rate_stats {
u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
@@ -519,6 +563,12 @@ struct ath12k_link_sta {
u8 link_idx;
};
+struct ath12k_reoq_buf {
+ void *vaddr;
+ dma_addr_t paddr_aligned;
+ u32 size;
+};
+
struct ath12k_sta {
struct ath12k_vif *ahvif;
enum hal_pn_type pn_type;
@@ -531,13 +581,25 @@ struct ath12k_sta {
u8 num_peer;
enum ieee80211_sta_state state;
+
+ struct ath12k_reoq_buf reoq_bufs[IEEE80211_NUM_TIDS + 1];
};
-#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5925
-#define ATH12K_MAX_6G_FREQ 7115
+#define ATH12K_HALF_20MHZ_BW 10
+#define ATH12K_2GHZ_MIN_CENTER 2412
+#define ATH12K_2GHZ_MAX_CENTER 2484
+#define ATH12K_5GHZ_MIN_CENTER 4900
+#define ATH12K_5GHZ_MAX_CENTER 5920
+#define ATH12K_6GHZ_MIN_CENTER 5935
+#define ATH12K_6GHZ_MAX_CENTER 7115
+#define ATH12K_MIN_2GHZ_FREQ (ATH12K_2GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW - 1)
+#define ATH12K_MAX_2GHZ_FREQ (ATH12K_2GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW + 1)
+#define ATH12K_MIN_5GHZ_FREQ (ATH12K_5GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_5GHZ_FREQ (ATH12K_5GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MIN_6GHZ_FREQ (ATH12K_6GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_6GHZ_FREQ (ATH12K_6GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
#define ATH12K_NUM_CHANS 101
-#define ATH12K_MAX_5G_CHAN 173
+#define ATH12K_MAX_5GHZ_CHAN 173
enum ath12k_hw_state {
ATH12K_HW_STATE_OFF,
@@ -728,7 +790,6 @@ struct ath12k {
#endif
bool dfs_block_radar_events;
- bool monitor_conf_enabled;
bool monitor_vdev_created;
bool monitor_started;
int monitor_vdev_id;
@@ -737,12 +798,22 @@ struct ath12k {
bool nlo_enabled;
+ /* Protected by wiphy::mtx lock. */
+ u32 vdev_id_11d_scan;
+ struct completion completed_11d_scan;
+ enum ath12k_11d_state state_11d;
+ u8 alpha2[REG_ALPHA2_LEN];
+ bool regdom_set_by_user;
+
struct completion fw_stats_complete;
struct completion mlo_setup_done;
u32 mlo_setup_status;
u8 ftm_msgref;
struct ath12k_fw_stats fw_stats;
+ unsigned long last_tx_power_update;
+
+ s8 max_allowed_tx_power;
};
struct ath12k_hw {
@@ -834,7 +905,7 @@ struct ath12k_board_data {
size_t len;
};
-struct ath12k_soc_dp_tx_err_stats {
+struct ath12k_device_dp_tx_err_stats {
/* TCL Ring Descriptor unavailable */
u32 desc_na[DP_TCL_NUM_RING_MAX];
/* Other failures during dp_tx due to mem allocation failure
@@ -843,13 +914,25 @@ struct ath12k_soc_dp_tx_err_stats {
atomic_t misc_fail;
};
-struct ath12k_soc_dp_stats {
+struct ath12k_device_dp_stats {
u32 err_ring_pkts;
u32 invalid_rbm;
u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
u32 hal_reo_error[DP_REO_DST_RING_MAX];
- struct ath12k_soc_dp_tx_err_stats tx_err;
+ struct ath12k_device_dp_tx_err_stats tx_err;
+ u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
+ u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
+ u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
+ u32 fw_tx_status[MAX_FW_TX_STATUS];
+ u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
+ u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
+ u32 tx_completed[DP_TCL_NUM_RING_MAX];
+};
+
+struct ath12k_reg_freq {
+ u32 start_freq;
+ u32 end_freq;
};
struct ath12k_mlo_memory {
@@ -873,7 +956,7 @@ struct ath12k_hw_group {
u8 num_probed;
u8 num_started;
unsigned long flags;
- struct ath12k_base *ab[ATH12K_MAX_SOCS];
+ struct ath12k_base *ab[ATH12K_MAX_DEVICES];
/* protects access to this struct */
struct mutex mutex;
@@ -887,7 +970,7 @@ struct ath12k_hw_group {
struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
u8 num_hw;
bool mlo_capable;
- struct device_node *wsi_node[ATH12K_MAX_SOCS];
+ struct device_node *wsi_node[ATH12K_MAX_DEVICES];
struct ath12k_mlo_memory mlo_mem;
struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
bool hw_link_id_init_done;
@@ -923,6 +1006,10 @@ struct ath12k_base {
void __iomem *mem;
unsigned long mem_len;
+ void __iomem *mem_ce;
+ u32 ce_remap_base_addr;
+ bool ce_remap;
+
struct {
enum ath12k_bus bus;
const struct ath12k_hif_ops *ops;
@@ -991,9 +1078,11 @@ struct ath12k_base {
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct ath12k_reg_info *reg_info[MAX_RADIOS];
+
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
- struct ath12k_soc_dp_stats soc_stats;
+ struct ath12k_device_dp_stats device_stats;
#ifdef CONFIG_ATH12K_DEBUGFS
struct dentry *debugfs_soc;
#endif
@@ -1011,6 +1100,8 @@ struct ath12k_base {
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
+ struct work_struct update_11d_work;
+ u8 new_alpha2[2];
struct {
/* protected by data_lock */
u32 fw_crash_counter;
@@ -1020,8 +1111,6 @@ struct ath12k_base {
struct ath12k_dbring_cap *db_caps;
u32 num_db_cap;
- struct timer_list mon_reap_timer;
-
struct completion htc_suspend;
u64 fw_soc_drop_count;
@@ -1051,6 +1140,7 @@ struct ath12k_base {
size_t m3_len;
DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ bool fw_features_valid;
} fw;
const struct hal_rx_ops *hal_rx_ops;
@@ -1087,6 +1177,14 @@ struct ath12k_base {
struct ath12k_wsi_info wsi_info;
enum ath12k_firmware_mode fw_mode;
struct ath12k_ftm_event_obj ftm_event_obj;
+ bool hw_group_ref;
+
+ /* Denote whether MLO is possible within the device */
+ bool single_chip_mlo_support;
+
+ struct ath12k_reg_freq reg_freq_2ghz;
+ struct ath12k_reg_freq reg_freq_5ghz;
+ struct ath12k_reg_freq reg_freq_6ghz;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -1185,6 +1283,7 @@ struct ath12k_fw_stats_pdev {
};
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
+void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
void ath12k_core_deinit(struct ath12k_base *ath12k);
@@ -1215,6 +1314,12 @@ u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+void ath12k_fw_stats_init(struct ath12k *ar);
+void ath12k_fw_stats_bcn_free(struct list_head *head);
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats);
+void ath12k_fw_stats_reset(struct ath12k *ar);
+struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
+ int index);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -1275,8 +1380,16 @@ static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab,
const char *filename,
void *buf, size_t buf_len)
{
- snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
- ab->hw_params->fw.dir, filename);
+ const char *fw_name = NULL;
+
+ of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+
+ if (fw_name && strncmp(filename, "board", 5))
+ snprintf(buf, buf_len, "%s/%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, fw_name, filename);
+ else
+ snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, filename);
}
static inline const char *ath12k_bus_str(enum ath12k_bus bus)
@@ -1284,6 +1397,8 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
switch (bus) {
case ATH12K_BUS_PCI:
return "pci";
+ case ATH12K_BUS_AHB:
+ return "ahb";
}
return "unknown";
@@ -1333,20 +1448,6 @@ static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
return ab->ag;
}
-static inline void ath12k_core_started(struct ath12k_base *ab)
-{
- lockdep_assert_held(&ab->ag->mutex);
-
- ab->ag->num_started++;
-}
-
-static inline void ath12k_core_stopped(struct ath12k_base *ab)
-{
- lockdep_assert_held(&ab->ag->mutex);
-
- ab->ag->num_started--;
-}
-
static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
u8 device_id)
{
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index 57002215ddf1..dd624d73b8b2 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -33,6 +33,76 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath12k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t
+ath12k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k_base *ab = file->private_data;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar = NULL;
+ char buf[32] = {0};
+ int i, ret;
+ ssize_t rc;
+
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar)
+ break;
+ }
+
+ if (!ar)
+ return -ENETDOWN;
+
+ if (!strcmp(buf, "assert")) {
+ ath12k_info(ab, "simulating firmware assert crash\n");
+ ret = ath12k_wmi_force_fw_hang_cmd(ar,
+ ATH12K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH12K_WMI_FW_HANG_DELAY);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret) {
+ ath12k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath12k_read_simulate_fw_crash,
+ .write = ath12k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t ath12k_write_tpc_stats_type(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -88,8 +158,8 @@ static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats,
u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
u8 band;
- band = ((chan_freq > ATH12K_MIN_6G_FREQ) ? NL80211_BAND_6GHZ :
- ((chan_freq > ATH12K_MIN_5G_FREQ) ? NL80211_BAND_5GHZ :
+ band = ((chan_freq > ATH12K_MIN_6GHZ_FREQ) ? NL80211_BAND_6GHZ :
+ ((chan_freq > ATH12K_MIN_5GHZ_FREQ) ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ));
if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
@@ -833,137 +903,352 @@ static const struct file_operations fops_extd_rx_stats = {
.open = simple_open,
};
-void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+static int ath12k_open_link_stats(struct inode *inode, struct file *file)
{
- bool dput_needed;
- char soc_name[64] = { 0 };
- struct dentry *debugfs_ath12k;
+ struct ath12k_vif *ahvif = inode->i_private;
+ size_t len = 0, buf_len = (PAGE_SIZE * 2);
+ struct ath12k_link_stats linkstat;
+ struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ struct wiphy *wiphy;
+ int link_id, i;
+ char *buf;
- debugfs_ath12k = debugfs_lookup("ath12k", NULL);
- if (debugfs_ath12k) {
- /* a dentry from lookup() needs dput() after we don't use it */
- dput_needed = true;
- } else {
- debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
- if (IS_ERR_OR_NULL(debugfs_ath12k))
- return;
- dput_needed = false;
+ if (!ahvif)
+ return -EINVAL;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wiphy = ahvif->ah->hw->wiphy;
+ wiphy_lock(wiphy);
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = rcu_dereference_protected(ahvif->link[link_id],
+ lockdep_is_held(&wiphy->mtx));
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ linkstat = arvif->link_stats;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Unicast Frames Enqueued = %d\n",
+ link_id, linkstat.tx_enqueued);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Broadcast Frames Enqueued = %d\n",
+ link_id, linkstat.tx_bcast_mcast);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frames Completed = %d\n",
+ link_id, linkstat.tx_completed);
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frames Dropped = %d\n",
+ link_id, linkstat.tx_dropped);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "link[%d] Tx Frame descriptor Encap Type = ",
+ link_id);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " raw:%d",
+ linkstat.tx_encap_type[0]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " native_wifi:%d",
+ linkstat.tx_encap_type[1]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " ethernet:%d",
+ linkstat.tx_encap_type[2]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\nlink[%d] Tx Frame descriptor Encrypt Type = ",
+ link_id);
+
+ for (i = 0; i < HAL_ENCRYPT_TYPE_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ " %d:%d", i,
+ linkstat.tx_encrypt_type[i]);
+ }
+ len += scnprintf(buf + len, buf_len - len,
+ "\nlink[%d] Tx Frame descriptor Type = buffer:%d extension:%d\n",
+ link_id, linkstat.tx_desc_type[0],
+ linkstat.tx_desc_type[1]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "------------------------------------------------------\n");
}
- scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
- dev_name(ab->dev));
+ wiphy_unlock(wiphy);
- ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+ file->private_data = buf;
- if (dput_needed)
- dput(debugfs_ath12k);
+ return 0;
}
-void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+static int ath12k_release_link_stats(struct inode *inode, struct file *file)
{
- debugfs_remove_recursive(ab->debugfs_soc);
- ab->debugfs_soc = NULL;
- /* We are not removing ath12k directory on purpose, even if it
- * would be empty. This simplifies the directory handling and it's
- * a minor cosmetic issue to leave an empty ath12k directory to
- * debugfs.
- */
+ kfree(file->private_data);
+ return 0;
}
-static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+static ssize_t ath12k_read_link_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ath12k_fw_stats_pdev *i, *tmp;
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static void ath12k_fw_stats_bcn_free(struct list_head *head)
+static const struct file_operations ath12k_fops_link_stats = {
+ .open = ath12k_open_link_stats,
+ .release = ath12k_release_link_stats,
+ .read = ath12k_read_link_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
- struct ath12k_fw_stats_bcn *i, *tmp;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
+ debugfs_create_file("link_stats", 0400, vif->debugfs_dir, ahvif,
+ &ath12k_fops_link_stats);
}
-static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+static ssize_t ath12k_debugfs_dump_device_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ath12k_fw_stats_vdev *i, *tmp;
+ struct ath12k_base *ab = file->private_data;
+ struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
+ int len = 0, i, j, ret;
+ struct ath12k *ar;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR] = "Overflow",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR] = "MPDU len",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR] = "FCS",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR] = "Decrypt",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR] = "TKIP MIC",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR] = "Unencrypt",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR] = "MSDU len",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR] = "MSDU limit",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR] = "WiFi parse",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR] = "AMSDU parse",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR] = "SA timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR] = "DA timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR] = "Flow timeout",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR] = "Flush req",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR] = "AMSDU frag",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR] = "Multicast echo",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR] = "AMSDU mismatch",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR] = "Unauth WDS",
+ [HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR] = "AMSDU or WDS"};
+
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO] = "Desc addr zero",
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID] = "Desc inval",
+ [HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA] = "AMPDU in non BA",
+ [HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE] = "Non BA dup",
+ [HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE] = "BA dup",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP] = "Frame 2k jump",
+ [HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP] = "BAR 2k jump",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR] = "Frame OOR",
+ [HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR] = "BAR OOR",
+ [HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION] = "No BA session",
+ [HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN] = "Frame SN equal SSN",
+ [HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED] = "PN check fail",
+ [HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET] = "2k err",
+ [HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET] = "PN err",
+ [HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED] = "Desc blocked"};
+
+ static const char *wbm_rel_src[HAL_WBM_REL_SRC_MODULE_MAX] = {
+ [HAL_WBM_REL_SRC_MODULE_TQM] = "TQM",
+ [HAL_WBM_REL_SRC_MODULE_RXDMA] = "Rxdma",
+ [HAL_WBM_REL_SRC_MODULE_REO] = "Reo",
+ [HAL_WBM_REL_SRC_MODULE_FW] = "FW",
+ [HAL_WBM_REL_SRC_MODULE_SW] = "SW"};
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
+ if (!buf)
+ return -ENOMEM;
-void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats.fw_stats_done = false;
- ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
- ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
- ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- spin_unlock_bh(&ar->data_lock);
-}
+ len += scnprintf(buf + len, size - len, "DEVICE RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ device_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ device_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
-static int ath12k_debugfs_fw_stats_request(struct ath12k *ar,
- struct ath12k_fw_stats_req_params *param)
-{
- struct ath12k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], device_stats->rxdma_error[i]);
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + msecs_to_jiffies(3 * 1000);
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], device_stats->reo_error[i]);
- ath12k_debugfs_fw_stats_reset(ar);
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
- reinit_completion(&ar->fw_stats_complete);
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len,
+ "ring%d: %u\n", i,
+ device_stats->hal_reo_error[i]);
- ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
- param->vdev_id, param->pdev_id);
+ len += scnprintf(buf + len, size - len, "\nDEVICE TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
- if (ret) {
- ath12k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, device_stats->tx_err.desc_na[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&device_stats->tx_err.misc_fail));
+
+ len += scnprintf(buf + len, size - len, "\ntx_wbm_rel_source:");
+
+ for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tx_wbm_rel_source[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntqm_rel_reason:");
+
+ for (i = 0; i < MAX_TQM_RELEASE_REASON; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tqm_rel_reason[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\nfw_tx_status:");
+
+ for (i = 0; i < MAX_FW_TX_STATUS; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->fw_tx_status[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntx_enqueued:");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u", i,
+ device_stats->tx_enqueued[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\ntx_completed:");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, " %d:%u",
+ i, device_stats->tx_completed[i]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, DP_SW2HW_MACID(i));
+ if (ar) {
+ len += scnprintf(buf + len, size - len,
+ "\nradio%d tx_pending: %u\n", i,
+ atomic_read(&ar->dp.num_tx_pending));
+ }
}
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
- 1 * HZ);
- /* If the wait timed out, return -ETIMEDOUT */
- if (!time_left)
- return -ETIMEDOUT;
+ len += scnprintf(buf + len, size - len, "\nREO Rx Received:\n");
- /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
- * when stats data buffer limit is reached. fw_stats_complete
- * is completed once host receives first event from firmware, but
- * still end might not be marked in the TLV.
- * Below loop is to confirm that firmware completed sending all the event
- * and fw_stats_done is marked true when end is marked in the TLV
- */
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ len += scnprintf(buf + len, size - len, "Ring%d:", i + 1);
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats.fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
+ for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
+ len += scnprintf(buf + len, size - len,
+ "\t%d:%u", j,
+ device_stats->reo_rx[i][j]);
}
- spin_unlock_bh(&ar->data_lock);
+
+ len += scnprintf(buf + len, size - len, "\n");
}
- return 0;
+
+ len += scnprintf(buf + len, size - len, "\nRx WBM REL SRC Errors:\n");
+
+ for (i = 0; i < HAL_WBM_REL_SRC_MODULE_MAX; i++) {
+ len += scnprintf(buf + len, size - len, "%s:", wbm_rel_src[i]);
+
+ for (j = 0; j < ATH12K_MAX_DEVICES; j++) {
+ len += scnprintf(buf + len,
+ size - len,
+ "\t%d:%u", j,
+ device_stats->rx_wbm_rel_source[i][j]);
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ return ret;
+}
+
+static const struct file_operations fops_device_dp_stats = {
+ .read = ath12k_debugfs_dump_device_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("device_dp_stats", 0400, ab->debugfs_soc, ab,
+ &fops_device_dp_stats);
+}
+
+void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+ bool dput_needed;
+ char soc_name[64] = { 0 };
+ struct dentry *debugfs_ath12k;
+
+ debugfs_ath12k = debugfs_lookup("ath12k", NULL);
+ if (debugfs_ath12k) {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ } else {
+ debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
+ if (IS_ERR_OR_NULL(debugfs_ath12k))
+ return;
+ dput_needed = false;
+ }
+
+ scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+
+ if (dput_needed)
+ dput(debugfs_ath12k);
+}
+
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+ /* We are not removing ath12k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath12k directory to
+ * debugfs.
+ */
}
void
@@ -1022,10 +1307,6 @@ ath12k_debugfs_fw_stats_process(struct ath12k *ar,
num_bcn = 0;
}
}
- if (stats->stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats->pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats.fw_stats_done = true;
- }
}
static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
@@ -1052,7 +1333,7 @@ static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
param.vdev_id = 0;
param.stats_id = WMI_REQUEST_VDEV_STAT;
- ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ ret = ath12k_mac_get_fw_stats(ar, &param);
if (ret) {
ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
return ret;
@@ -1117,7 +1398,7 @@ static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
continue;
param.vdev_id = arvif->vdev_id;
- ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ ret = ath12k_mac_get_fw_stats(ar, &param);
if (ret) {
ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
return ret;
@@ -1184,7 +1465,7 @@ static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
param.vdev_id = 0;
param.stats_id = WMI_REQUEST_PDEV_STAT;
- ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ ret = ath12k_mac_get_fw_stats(ar, &param);
if (ret) {
ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
return ret;
@@ -1239,11 +1520,7 @@ void ath12k_debugfs_fw_stats_register(struct ath12k *ar)
debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
&fops_pdev_stats);
- INIT_LIST_HEAD(&ar->fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->fw_stats.bcn);
- INIT_LIST_HEAD(&ar->fw_stats.pdevs);
-
- init_completion(&ar->fw_stats_complete);
+ ath12k_fw_stats_init(ar);
}
void ath12k_debugfs_register(struct ath12k *ar)
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index d7041297d5d8..ebef7dace344 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -14,7 +14,9 @@ void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats);
-void ath12k_debugfs_fw_stats_reset(struct ath12k *ar);
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void ath12k_debugfs_pdev_create(struct ath12k_base *ab);
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
@@ -129,10 +131,6 @@ static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
{
}
-static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
-{
-}
-
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
return false;
@@ -142,6 +140,15 @@ static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
{
return 0;
}
+
+static inline void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+}
+
+static inline void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
+{
+}
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index 1c0d5fa39a8d..aeaf970339d4 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -5377,6 +5377,9 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file,
const int size = 32;
int num_args;
+ if (count > size)
+ return -EINVAL;
+
char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 50c36e6ea102..6317c6d4c043 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -168,6 +168,8 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->reo_status[0];
break;
case HAL_RXDMA_MONITOR_STATUS:
+ grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0];
+ break;
case HAL_RXDMA_MONITOR_DST:
grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
break;
@@ -274,12 +276,17 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
break;
case HAL_RXDMA_BUF:
case HAL_RXDMA_MONITOR_BUF:
- case HAL_RXDMA_MONITOR_STATUS:
params.low_threshold = num_entries >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.intr_batch_cntr_thres_entries = 0;
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
case HAL_TX_MONITOR_DST:
params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
@@ -354,7 +361,10 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab,
u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
/* only valid if idx_lookup_override is not set in tcl_data_cmd */
- bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+ else
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
HAL_TX_BANK_CONFIG_ADDRX_EN) |
@@ -919,6 +929,25 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
goto done;
}
+ if (ab->hw_params->ring_mask->rx_mon_status[grp_id]) {
+ ring_mask = ab->hw_params->ring_mask->rx_mon_status[grp_id];
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ 0);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
@@ -982,11 +1011,6 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
- if (!ab->mon_reap_timer.function)
- return;
-
- timer_delete_sync(&ab->mon_reap_timer);
-
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
@@ -1024,27 +1048,6 @@ void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
ab->hal_rx_ops->rx_desc_get_desc_size();
}
-static void ath12k_dp_service_mon_ring(struct timer_list *t)
-{
- struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
- int i;
-
- for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
- ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
- ATH12K_DP_RX_MONITOR_MODE);
-
- mod_timer(&ab->mon_reap_timer, jiffies +
- msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
-}
-
-static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
-{
- if (ab->hw_params->rxdma1_enable)
- return;
-
- timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
-}
-
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -1055,8 +1058,6 @@ int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
if (ret)
goto out;
- ath12k_dp_mon_reap_timer_init(ab);
-
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -1107,11 +1108,8 @@ static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
{
switch (arvif->ahvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
- /* TODO: Verify the search type and flags since ast hash
- * is not part of peer mapv3
- */
arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
- arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
@@ -1206,11 +1204,19 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
if (!skb)
continue;
+ skb_cb = ATH12K_SKB_CB(skb);
+ if (skb_cb->paddr_ext_desc) {
+ dma_unmap_single(ab->dev,
+ skb_cb->paddr_ext_desc,
+ tx_desc_info->skb_ext_desc->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(tx_desc_info->skb_ext_desc);
+ }
+
/* if we are unregistering, hw would've been destroyed and
* ar is no longer valid.
*/
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
- skb_cb = ATH12K_SKB_CB(skb);
ar = skb_cb->ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
@@ -1261,22 +1267,24 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (dp->reoq_lut.vaddr) {
+ if (dp->reoq_lut.vaddr_unaligned) {
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG +
HAL_REO1_QDESC_LUT_BASE0(ab), 0);
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
+ dma_free_coherent(ab->dev, dp->reoq_lut.size,
+ dp->reoq_lut.vaddr_unaligned,
+ dp->reoq_lut.paddr_unaligned);
+ dp->reoq_lut.vaddr_unaligned = NULL;
}
- if (dp->ml_reoq_lut.vaddr) {
+ if (dp->ml_reoq_lut.vaddr_unaligned) {
ath12k_hif_write32(ab,
HAL_SEQ_WCSS_UMAC_REO_REG +
HAL_REO1_QDESC_LUT_BASE1(ab), 0);
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
- dp->ml_reoq_lut.vaddr = NULL;
+ dma_free_coherent(ab->dev, dp->ml_reoq_lut.size,
+ dp->ml_reoq_lut.vaddr_unaligned,
+ dp->ml_reoq_lut.paddr_unaligned);
+ dp->ml_reoq_lut.vaddr_unaligned = NULL;
}
}
@@ -1608,39 +1616,67 @@ free:
return ret;
}
+static int ath12k_dp_alloc_reoq_lut(struct ath12k_base *ab,
+ struct ath12k_reo_q_addr_lut *lut)
+{
+ lut->size = DP_REOQ_LUT_SIZE + HAL_REO_QLUT_ADDR_ALIGN - 1;
+ lut->vaddr_unaligned = dma_alloc_coherent(ab->dev, lut->size,
+ &lut->paddr_unaligned,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lut->vaddr_unaligned)
+ return -ENOMEM;
+
+ lut->vaddr = PTR_ALIGN(lut->vaddr_unaligned, HAL_REO_QLUT_ADDR_ALIGN);
+ lut->paddr = lut->paddr_unaligned +
+ ((unsigned long)lut->vaddr - (unsigned long)lut->vaddr_unaligned);
+ return 0;
+}
+
static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ u32 val;
+ int ret;
if (!ab->hw_params->reoq_lut_support)
return 0;
- dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
- DP_REOQ_LUT_SIZE,
- &dp->reoq_lut.paddr,
- GFP_KERNEL | __GFP_ZERO);
- if (!dp->reoq_lut.vaddr) {
+ ret = ath12k_dp_alloc_reoq_lut(ab, &dp->reoq_lut);
+ if (ret) {
ath12k_warn(ab, "failed to allocate memory for reoq table");
- return -ENOMEM;
+ return ret;
}
- dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
- DP_REOQ_LUT_SIZE,
- &dp->ml_reoq_lut.paddr,
- GFP_KERNEL | __GFP_ZERO);
- if (!dp->ml_reoq_lut.vaddr) {
+ ret = ath12k_dp_alloc_reoq_lut(ab, &dp->ml_reoq_lut);
+ if (ret) {
ath12k_warn(ab, "failed to allocate memory for ML reoq table");
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
- return -ENOMEM;
+ dma_free_coherent(ab->dev, dp->reoq_lut.size,
+ dp->reoq_lut.vaddr_unaligned,
+ dp->reoq_lut.paddr_unaligned);
+ dp->reoq_lut.vaddr_unaligned = NULL;
+ return ret;
}
+ /* Bits in the register have address [39:8] LUT base address to be
+ * allocated such that LSBs are assumed to be zero. Also, current
+ * design supports paddr up to 4 GB max hence it fits in 32 bit
+ * register only
+ */
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
- dp->reoq_lut.paddr);
+ dp->reoq_lut.paddr >> 8);
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
dp->ml_reoq_lut.paddr >> 8);
+ val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab));
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_ADDR(ab),
+ val | HAL_REO_QDESC_ADDR_READ_LUT_ENABLE);
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_MAX_PEERID(ab),
+ HAL_REO_QDESC_MAX_PEERID);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 75435a931548..a353333f83b6 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -7,6 +7,7 @@
#ifndef ATH12K_DP_H
#define ATH12K_DP_H
+#include "hal_desc.h"
#include "hal_rx.h"
#include "hw.h"
@@ -69,6 +70,16 @@ struct ath12k_pdev_mon_stats {
u32 dest_mpdu_drop;
u32 dup_mon_linkdesc_cnt;
u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
+};
+
+enum dp_mon_status_buf_state {
+ DP_MON_STATUS_MATCH,
+ DP_MON_STATUS_NO_DMA,
+ DP_MON_STATUS_LAG,
+ DP_MON_STATUS_LEAD,
+ DP_MON_STATUS_REPLINISH,
};
struct dp_link_desc_bank {
@@ -106,6 +117,8 @@ struct dp_mon_mpdu {
struct list_head list;
struct sk_buff *head;
struct sk_buff *tail;
+ u32 err_bitmap;
+ u8 decap_format;
};
#define DP_MON_MAX_STATUS_BUF 32
@@ -118,8 +131,11 @@ struct ath12k_mon_data {
u32 mon_last_buf_cookie;
u64 mon_last_linkdesc_paddr;
u16 chan_noise_floor;
+ u32 err_bitmap;
+ u8 decap_format;
struct ath12k_pdev_mon_stats rx_mon_stats;
+ enum dp_mon_status_buf_state buf_state;
/* lock for monitor data */
spinlock_t mon_lock;
struct sk_buff_head rx_status_q;
@@ -188,6 +204,14 @@ struct ath12k_pdev_dp {
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
+#define RX_MON_STATUS_BASE_BUF_SIZE 2048
+#define RX_MON_STATUS_BUF_ALIGN 128
+#define RX_MON_STATUS_BUF_RESERVATION 128
+#define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \
+ (RX_MON_STATUS_BUF_RESERVATION + \
+ RX_MON_STATUS_BUF_ALIGN + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
@@ -263,6 +287,9 @@ struct ath12k_pdev_dp {
/* Invalid TX Bank ID value */
#define DP_INVALID_BANK_ID -1
+#define MAX_TQM_RELEASE_REASON 15
+#define MAX_FW_TX_STATUS 7
+
struct ath12k_dp_tx_bank_profile {
u8 is_configured;
u32 num_users;
@@ -293,11 +320,18 @@ struct ath12k_rx_desc_info {
struct ath12k_tx_desc_info {
struct list_head list;
struct sk_buff *skb;
+ struct sk_buff *skb_ext_desc;
u32 desc_id; /* Cookie */
u8 mac_id;
u8 pool_id;
};
+struct ath12k_tx_desc_params {
+ struct sk_buff *skb;
+ struct sk_buff *skb_ext_desc;
+ u8 mac_id;
+};
+
struct ath12k_spt_info {
dma_addr_t paddr;
u64 *vaddr;
@@ -309,12 +343,26 @@ struct ath12k_reo_queue_ref {
} __packed;
struct ath12k_reo_q_addr_lut {
- dma_addr_t paddr;
+ u32 *vaddr_unaligned;
u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+struct ath12k_link_stats {
+ u32 tx_enqueued;
+ u32 tx_completed;
+ u32 tx_bcast_mcast;
+ u32 tx_dropped;
+ u32 tx_encap_type[HAL_TCL_ENCAP_TYPE_MAX];
+ u32 tx_encrypt_type[HAL_ENCRYPT_TYPE_MAX];
+ u32 tx_desc_type[HAL_TCL_DESC_TYPE_MAX];
};
struct ath12k_dp {
struct ath12k_base *ab;
+ u32 mon_dest_ring_stuck_cnt;
u8 num_bank_profiles;
/* protects the access and update of bank_profiles */
spinlock_t tx_bank_lock;
@@ -367,6 +415,7 @@ struct ath12k_dp {
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
+ struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ath12k_reo_q_addr_lut reoq_lut;
struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
@@ -1307,6 +1356,8 @@ struct htt_t2h_version_conf_msg {
#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL GENMASK(31, 16)
#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index d22800e89485..28cadc4167f7 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1416,6 +1416,40 @@ ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_
}
}
+static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
+{
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ *errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ *errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+}
+
+static void
+ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
+ const struct hal_rx_msdu_end *msdu_end)
+{
+ ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
+ &pmon->err_bitmap);
+ pmon->decap_format = le32_get_bits(msdu_end->info1,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
static enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
struct ath12k_mon_data *pmon,
@@ -1647,7 +1681,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
}
- break;
+ return HAL_RX_MON_STATUS_MPDU_START;
}
case HAL_RX_MSDU_START:
/* TODO: add msdu start parsing logic */
@@ -1655,6 +1689,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
case HAL_MON_BUF_ADDR:
return HAL_RX_MON_STATUS_BUF_ADDR;
case HAL_RX_MSDU_END:
+ ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
return HAL_RX_MON_STATUS_MSDU_END;
case HAL_RX_MPDU_END:
return HAL_RX_MON_STATUS_MPDU_END;
@@ -1688,45 +1723,295 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
}
+static void
+ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ u32 center_freq = ppdu_info->freq;
+
+ rx_status->freq = center_freq;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw);
+ rx_status->nss = ppdu_info->nss;
+ rx_status->rate_idx = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_6GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ } else if (center_freq >= ATH12K_MIN_2GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_2GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (center_freq >= ATH12K_MIN_5GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_5GHZ_FREQ) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ rx_status->band = NUM_NL80211_BANDS;
+ }
+}
+
+static struct sk_buff
+*ath12k_dp_rx_alloc_mon_status_buf(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int *buf_id)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+static enum dp_mon_status_buf_state
+ath12k_dp_rx_mon_buf_done(struct ath12k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_mon_ring *rx_ring)
+{
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath12k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath12k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
+static u32 ath12k_dp_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* Hold on mon dest ring, and reap mon status ring. */
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* PPDU ID has exceeded the maximum value and will
+ * restart from 0.
+ */
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static
+void ath12k_dp_mon_next_link_desc_get(struct hal_rx_msdu_link *msdu_link,
+ dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm,
+ struct ath12k_buffer_addr **pp_buf_addr_info)
+{
+ struct ath12k_buffer_addr *buf_addr_info;
+
+ buf_addr_info = &msdu_link->buf_addr_info;
+
+ ath12k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
+
+ *pp_buf_addr_info = buf_addr_info;
+}
+
+static void
+ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 rate_mcs, nss, sgi;
+ bool is_cck;
+
+ pkt_type = ppdu_info->preamble_type;
+ rate_mcs = ppdu_info->rate;
+ nss = ppdu_info->nss;
+ sgi = ppdu_info->gi;
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ if (rx_status->band < NUM_NL80211_BANDS) {
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ }
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ break;
+ default:
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "monitor receives invalid preamble type %d",
+ pkt_type);
+ break;
+ }
+}
+
static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
struct sk_buff *head_msdu,
struct sk_buff *tail_msdu)
{
- u32 rx_pkt_offset, l2_hdr_offset;
+ u32 rx_pkt_offset, l2_hdr_offset, total_offset;
rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset =
ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
- skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset);
+
+ if (ar->ab->hw_params->rxdma1_enable)
+ total_offset = ATH12K_MON_RX_PKT_OFFSET;
+ else
+ total_offset = rx_pkt_offset + l2_hdr_offset;
+
+ skb_pull(head_msdu, total_offset);
}
static struct sk_buff *
ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
- struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
- struct ieee80211_rx_status *rxs, bool *fcs_err)
+ struct dp_mon_mpdu *mon_mpdu,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct ieee80211_rx_status *rxs)
{
struct ath12k_base *ab = ar->ab;
struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
- struct hal_rx_desc *rx_desc, *tail_rx_desc;
- u8 *hdr_desc, *dest, decap_format;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format;
struct ieee80211_hdr_3addr *wh;
- u32 err_bitmap, frag_list_sum_len = 0;
+ struct ieee80211_channel *channel;
+ u32 frag_list_sum_len = 0;
+ u8 channel_num = ppdu_info->chan_num;
mpdu_buf = NULL;
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
- if (!head_msdu)
+ if (!head_msdu || !tail_msdu)
goto err_merge_fail;
- rx_desc = (struct hal_rx_desc *)head_msdu->data;
- tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data;
+ ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs);
+
+ if (unlikely(rxs->band == NUM_NL80211_BANDS ||
+ !ath12k_ar_to_hw(ar)->wiphy->bands[rxs->band])) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+ rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx);
- err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc);
- if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
- *fcs_err = true;
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rxs->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
- decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc);
+ if (rxs->band < NUM_NL80211_BANDS)
+ rxs->freq = ieee80211_channel_to_frequency(channel_num,
+ rxs->band);
- ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs);
+ ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs);
if (decap_format == DP_RX_DECAP_TYPE_RAW) {
ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
@@ -1736,7 +2021,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
head_frag_list = NULL;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (!head_frag_list)
head_frag_list = msdu;
@@ -1748,7 +2033,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
prev_buf->next = NULL;
- skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ skb_trim(prev_buf, prev_buf->len);
if (head_frag_list) {
skb_shinfo(head_msdu)->frag_list = head_frag_list;
head_msdu->data_len = frag_list_sum_len;
@@ -1771,7 +2056,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
msdu = head_msdu;
while (msdu) {
- ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
if (qos_pkt) {
dest = skb_push(msdu, sizeof(__le16));
if (!dest)
@@ -1954,7 +2239,8 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ u8 decap)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
@@ -1966,10 +2252,12 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u8 decap = DP_RX_DECAP_TYPE_RAW;
+ struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
+ status->link_valid = 0;
+
if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
!(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
@@ -1977,10 +2265,9 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
status->flag |= RX_FLAG_RADIOTAP_HE;
}
- if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ rx_info.addr2_present = false;
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2035,25 +2322,23 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
- struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
+ struct dp_mon_mpdu *mon_mpdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
{
struct ath12k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
- bool fcs_err = false;
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar,
- head_msdu, tail_msdu,
- rxs, &fcs_err);
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mon_mpdu, ppduinfo, rxs);
if (!mon_skb)
goto mon_deliver_fail;
header = mon_skb;
rxs->flag = 0;
- if (fcs_err)
+ if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS)
rxs->flag = RX_FLAG_FAILED_FCS_CRC;
do {
@@ -2070,8 +2355,12 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (!(rxs->flag & RX_FLAG_ONLY_MONITOR))
+ decap = mon_mpdu->decap_format;
+
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
@@ -2079,7 +2368,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
return 0;
mon_deliver_fail:
- mon_skb = head_msdu;
+ mon_skb = mon_mpdu->head;
while (mon_skb) {
skb_next = mon_skb->next;
dev_kfree_skb_any(mon_skb);
@@ -2088,6 +2377,133 @@ mon_deliver_fail:
return -EINVAL;
}
+static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+
+ return 0;
+}
+
+/* Hardware fill buffer with 128 bytes aligned. So need to reap it
+ * with 128 bytes aligned.
+ */
+#define RXDMA_DATA_DMA_BLOCK_SIZE 128
+
+static void
+ath12k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ *is_frag = true;
+ *frag_len = (RX_MON_STATUS_BASE_BUF_SIZE -
+ sizeof(struct hal_rx_desc)) &
+ ~(RXDMA_DATA_DMA_BLOCK_SIZE - 1);
+ *total_len += *frag_len;
+ } else {
+ if (*is_frag)
+ *frag_len = info->msdu_len - *total_len;
+ else
+ *frag_len = info->msdu_len;
+
+ *msdu_cnt -= 1;
+ }
+}
+
+static int
+ath12k_dp_mon_parse_status_buf(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ const struct dp_mon_packet_info *packet_info)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct sk_buff *msdu;
+ int buf_id;
+ u32 offset;
+
+ buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "mon dest desc with inval buf_id %d\n", buf_id);
+ return 0;
+ }
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ offset = packet_info->dma_length + ATH12K_MON_RX_DOT11_OFFSET;
+ if (ath12k_dp_pkt_set_pktlen(msdu, offset)) {
+ dev_kfree_skb_any(msdu);
+ goto dest_replenish;
+ }
+
+ if (!pmon->mon_mpdu->head)
+ pmon->mon_mpdu->head = msdu;
+ else
+ pmon->mon_mpdu->tail->next = msdu;
+
+ pmon->mon_mpdu->tail = msdu;
+
+dest_replenish:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ enum hal_rx_mon_status hal_status,
+ const void *tlv_data)
+{
+ switch (hal_status) {
+ case HAL_RX_MON_STATUS_MPDU_START:
+ if (WARN_ON_ONCE(pmon->mon_mpdu))
+ break;
+
+ pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC);
+ if (!pmon->mon_mpdu)
+ return -ENOMEM;
+ break;
+ case HAL_RX_MON_STATUS_BUF_ADDR:
+ return ath12k_dp_mon_parse_status_buf(ar, pmon, tlv_data);
+ case HAL_RX_MON_STATUS_MPDU_END:
+ /* If no MSDU then free empty MPDU */
+ if (pmon->mon_mpdu->tail) {
+ pmon->mon_mpdu->tail->next = NULL;
+ list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ } else {
+ kfree(pmon->mon_mpdu);
+ }
+ pmon->mon_mpdu = NULL;
+ break;
+ case HAL_RX_MON_STATUS_MSDU_END:
+ pmon->mon_mpdu->decap_format = pmon->decap_format;
+ pmon->mon_mpdu->err_bitmap = pmon->err_bitmap;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static enum hal_rx_mon_status
ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
struct sk_buff *skb)
@@ -2114,14 +2530,20 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
+
+ if (ar->monitor_started && ar->ab->hw_params->rxdma1_enable &&
+ ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value))
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+
ptr += sizeof(*tlv) + tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
- if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ if ((ptr - skb->data) > skb->len)
break;
} while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
(hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_START) ||
(hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
(hal_status == HAL_RX_MON_STATUS_MSDU_END));
@@ -2141,23 +2563,21 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct dp_mon_mpdu *tmp;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
- struct sk_buff *head_msdu, *tail_msdu;
- enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ enum hal_rx_mon_status hal_status;
- ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
+ return hal_status;
list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
- head_msdu = mon_mpdu->head;
- tail_msdu = mon_mpdu->tail;
- if (head_msdu && tail_msdu) {
- ath12k_dp_mon_rx_deliver(ar, head_msdu,
- tail_msdu, ppdu_info, napi);
- }
+ if (mon_mpdu->head && mon_mpdu->tail)
+ ath12k_dp_mon_rx_deliver(ar, mon_mpdu, ppdu_info, napi);
kfree(mon_mpdu);
}
+
return hal_status;
}
@@ -2236,6 +2656,94 @@ fail_alloc_skb:
return -ENOMEM;
}
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries)
+{
+ enum hal_rx_buf_return_buf_manager mgr =
+ ab->hw_params->hal_params->rx_buf_rbm;
+ int num_free, num_remain, buf_id;
+ struct ath12k_buffer_addr *desc;
+ struct hal_srng *srng;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ u32 cookie;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(RX_MON_STATUS_BUF_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ RX_MON_STATUS_BUF_ALIGN)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, RX_MON_STATUS_BUF_ALIGN) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+ cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_buf_unassign;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ num_remain--;
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_buf_unassign:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
static struct dp_mon_tx_ppdu_info *
ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
unsigned int ppdu_id,
@@ -2838,16 +3346,13 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
struct dp_mon_mpdu *tmp, *mon_mpdu;
- struct sk_buff *head_msdu, *tail_msdu;
list_for_each_entry_safe(mon_mpdu, tmp,
&tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
- head_msdu = mon_mpdu->head;
- tail_msdu = mon_mpdu->tail;
- if (head_msdu)
- ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu,
+ if (mon_mpdu->head)
+ ath12k_dp_mon_rx_deliver(ar, mon_mpdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@@ -2949,11 +3454,10 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
- if (!rx_stats)
- return;
-
arsta->rssi_comb = ppdu_info->rssi_comb;
ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+ if (!rx_stats)
+ return;
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -3126,14 +3630,12 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
ahsta = ath12k_sta_to_ahsta(peer->sta);
arsta = &ahsta->deflink;
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
rx_stats = arsta->rx_stats;
-
if (!rx_stats)
return;
- arsta->rssi_comb = ppdu_info->rssi_comb;
- ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
-
num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
user_stats->udp_msdu_count + user_stats->other_msdu_count;
@@ -3346,7 +3848,7 @@ move_next:
ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
while ((skb = __skb_dequeue(&skb_list))) {
- hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ hal_status = ath12k_dp_mon_rx_parse_mon_status(ar, pmon, skb, napi);
if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
ppdu_info->ppdu_continuation = true;
dev_kfree_skb_any(skb);
@@ -3388,6 +3890,487 @@ free_skb:
return num_buffs_reaped;
}
+static int ath12k_dp_rx_reap_mon_status_ring(struct ath12k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ const struct ath12k_hw_hal_params *hal_params;
+ int buf_id, srng_id, num_buffs_reaped = 0;
+ enum dp_mon_status_buf_state reap_status;
+ struct dp_rxdma_mon_ring *rx_ring;
+ struct ath12k_mon_data *pmon;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_tlv_64_hdr *tlv;
+ void *rx_mon_status_desc;
+ struct hal_srng *srng;
+ struct ath12k_dp *dp;
+ struct sk_buff *skb;
+ struct ath12k *ar;
+ dma_addr_t paddr;
+ u32 cookie;
+ u8 rbm;
+
+ ar = ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
+ dp = &ab->dp;
+ pmon = &ar->dp.mon_data;
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc = ath12k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ break;
+ }
+ ath12k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb) {
+ ath12k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_64_hdr *)skb->data;
+ if (le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
+ ath12k_warn(ab,
+ "mon status DONE not set %llx, buf_id %d\n",
+ le64_get_bits(tlv->tl, HAL_TLV_HDR_TAG),
+ buf_id);
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
+ */
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+ reap_status = ath12k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (ath12k_dp_pkt_set_pktlen(skb, RX_MON_STATUS_BUF_SIZE)) {
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
+ __skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ }
+move_next:
+ skb = ath12k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+
+ if (!skb) {
+ ath12k_warn(ab, "failed to alloc buffer for status ring\n");
+ hal_params = ab->hw_params->hal_params;
+ ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ hal_params->rx_buf_rbm);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH12K_SKB_RXCB(skb);
+
+ cookie = u32_encode_bits(mac_id, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ ath12k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie,
+ ab->hw_params->hal_params->rx_buf_rbm);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static u32
+ath12k_dp_rx_mon_mpdu_pop(struct ath12k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct list_head *used_list,
+ u32 *npackets, u32 *ppdu_id)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_buffer_addr *p_buf_addr_info, *p_last_buf_addr_info;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0, total_len = 0, frag_len = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ bool is_frag, is_first_msdu, drop_mpdu = false;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ u32 rx_bufs_used = 0, i = 0, desc_bank = 0;
+ struct hal_rx_desc *rx_desc, *tail_rx_desc;
+ struct hal_rx_msdu_link *msdu_link_desc;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_buffer_addr buf_info;
+ struct hal_rx_msdu_list msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ u16 num_msdus = 0;
+ dma_addr_t paddr;
+ u8 rbm;
+
+ ath12k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
+ &msdu_cnt);
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ if (le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err = le32_get_bits(ent_desc->info1,
+ HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+
+ desc_bank = u32_get_bits(sw_cookie, DP_LINK_DESC_BANK_MASK);
+ msdu_link_desc =
+ ar->ab->dp.link_desc_banks[desc_bank].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[desc_bank].paddr);
+
+ ath12k_hal_rx_msdu_list_get(ar, msdu_link_desc, &msdu_list,
+ &num_msdus);
+ desc_info = ath12k_dp_get_rx_desc(ar->ab,
+ msdu_list.sw_cookie[num_msdus - 1]);
+ tail_rx_desc = (struct hal_rx_desc *)(desc_info->skb)->data;
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+
+ desc_info =
+ ath12k_dp_get_rx_desc(ar->ab, msdu_list.sw_cookie[i]);
+ msdu = desc_info->skb;
+
+ if (!msdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "msdu_pop: invalid msdu (%d/%d)\n",
+ i + 1, num_msdus);
+ goto next_msdu;
+ }
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ if (rxcb->paddr != msdu_list.paddr[i]) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d paddr %lx != %lx\n",
+ i, (unsigned long)rxcb->paddr,
+ (unsigned long)msdu_list.paddr[i]);
+ drop_mpdu = true;
+ continue;
+ }
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, tail_rx_desc);
+ if (is_first_msdu) {
+ if (!ath12k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+ msdu_ppdu_id =
+ ath12k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
+
+ if (ath12k_dp_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id)) {
+ spin_unlock_bh(&pmon->mon_lock);
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath12k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ if (ath12k_dp_pkt_set_pktlen(msdu, rx_buf_size)) {
+ dev_kfree_skb_any(msdu);
+ goto next_msdu;
+ }
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, used_list);
+ }
+
+ ath12k_hal_rx_buf_addr_info_set(&buf_info, paddr, sw_cookie, rbm);
+
+ ath12k_dp_mon_next_link_desc_get(msdu_link_desc, &paddr,
+ &sw_cookie, &rbm,
+ &p_buf_addr_info);
+
+ ath12k_dp_rx_link_desc_return(ar->ab, &buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
+static void ath12k_dp_rx_mon_dest_process(struct ath12k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats;
+ u32 ppdu_id, rx_bufs_used = 0, ring_id;
+ u32 mpdu_rx_bufs_used, npackets = 0;
+ struct ath12k_dp *dp = &ar->ab->dp;
+ struct ath12k_base *ab = ar->ab;
+ void *ring_entry, *mon_dst_srng;
+ struct dp_mon_mpdu *tmp_mpdu;
+ LIST_HEAD(rx_desc_used_list);
+ struct hal_srng *srng;
+
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+ srng = &ab->hal.srng_list[ring_id];
+
+ mon_dst_srng = &ab->hal.srng_list[ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath12k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath12k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu, &tail_msdu,
+ &rx_desc_used_list,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ spin_unlock_bh(&pmon->mon_lock);
+ continue;
+ }
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ spin_lock_bh(&pmon->mon_lock);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ spin_unlock_bh(&pmon->mon_lock);
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ break;
+ }
+
+ if (head_msdu && tail_msdu) {
+ tmp_mpdu = kzalloc(sizeof(*tmp_mpdu), GFP_ATOMIC);
+ if (!tmp_mpdu)
+ break;
+
+ tmp_mpdu->head = head_msdu;
+ tmp_mpdu->tail = tail_msdu;
+ tmp_mpdu->err_bitmap = pmon->err_bitmap;
+ tmp_mpdu->decap_format = pmon->decap_format;
+ ath12k_dp_mon_rx_deliver(ar, tmp_mpdu,
+ &pmon->mon_ppdu_info, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ kfree(tmp_mpdu);
+ }
+
+ ring_entry = ath12k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath12k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ ath12k_dp_rx_bufs_replenish(ar->ab,
+ &dp->rx_refill_buf_ring,
+ &rx_desc_used_list,
+ rx_bufs_used);
+ }
+}
+
+static int
+__ath12k_dp_mon_process_ring(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget)
+{
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&ar->dp.mon_data;
+ struct ath12k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff_head skb_list;
+ int num_buffs_reaped;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath12k_dp_rx_reap_mon_status_ring(ar->ab, mac_id,
+ budget, &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+
+ if (ar->monitor_started &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath12k_dp_rx_mon_dest_process(ar, mac_id, *budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+exit:
+ return num_buffs_reaped;
+}
+
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode)
@@ -3398,6 +4381,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
if (ab->hw_params->rxdma1_enable) {
if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
+ } else {
+ if (ar->monitor_started)
+ num_buffs_reaped =
+ __ath12k_dp_mon_process_ring(ar, mac_id, napi, &budget);
}
return num_buffs_reaped;
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
index e4368eb42aca..e25595cbdcf3 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.h
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_MON_H
@@ -9,6 +9,9 @@
#include "core.h"
+#define ATH12K_MON_RX_DOT11_OFFSET 5
+#define ATH12K_MON_RX_PKT_OFFSET 8
+
enum dp_monitor_mode {
ATH12K_DP_TX_MONITOR_MODE,
ATH12K_DP_RX_MONITOR_MODE
@@ -82,6 +85,9 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
+int ath12k_dp_mon_status_bufs_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ int req_entries);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 75bf4211ad42..7e1a8f29c7b6 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -194,6 +194,22 @@ static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
+u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
+
+ return tlv_tag == HAL_RX_MPDU_START;
+}
+
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -228,12 +244,6 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
-static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
- struct hal_rx_desc *desc)
-{
- return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
-}
-
static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -420,9 +430,17 @@ static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ int i;
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
+ if (ab->hw_params->rxdma1_enable)
+ return 0;
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
+ ath12k_dp_rxdma_mon_buf_ring_free(ab,
+ &dp->rx_mon_status_refill_ring[i]);
+
return 0;
}
@@ -436,7 +454,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
ath12k_hal_srng_get_entrysize(ab, ringtype);
rx_ring->bufs_max = num_entries;
- ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
+
+ if (ringtype == HAL_RXDMA_MONITOR_STATUS)
+ ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
+ num_entries);
+ else
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
return 0;
}
@@ -457,7 +480,8 @@ static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- int ret;
+ struct dp_rxdma_mon_ring *mon_ring;
+ int ret, i;
ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
if (ret) {
@@ -470,9 +494,19 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
&dp->rxdma_mon_buf_ring,
HAL_RXDMA_MONITOR_BUF);
- if (ret) {
+ if (ret)
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ mon_ring = &dp->rx_mon_status_refill_ring[i];
+ ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
return ret;
}
}
@@ -556,9 +590,9 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
spin_lock_bh(&dp->reo_cmd_lock);
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
- dma_unmap_single(ab->dev, cmd->data.paddr,
- cmd->data.size, DMA_BIDIRECTIONAL);
- kfree(cmd->data.vaddr);
+ dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
+ cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.qbuf.vaddr);
kfree(cmd);
}
@@ -566,9 +600,9 @@ void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
- dma_unmap_single(ab->dev, cmd_cache->data.paddr,
- cmd_cache->data.size, DMA_BIDIRECTIONAL);
- kfree(cmd_cache->data.vaddr);
+ dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
+ cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.qbuf.vaddr);
kfree(cmd_cache);
}
spin_unlock_bh(&dp->reo_cmd_lock);
@@ -583,10 +617,10 @@ static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
@@ -641,13 +675,13 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
unsigned long tot_desc_sz, desc_sz;
int ret;
- tot_desc_sz = rx_tid->size;
+ tot_desc_sz = rx_tid->qbuf.size;
desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
while (tot_desc_sz > desc_sz) {
tot_desc_sz -= desc_sz;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE, &cmd,
NULL);
@@ -658,8 +692,8 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
}
memset(&cmd, 0, sizeof(cmd));
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
@@ -667,10 +701,10 @@ static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
if (ret) {
ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
}
@@ -729,10 +763,10 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -762,6 +796,7 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
qref->info1 = u32_encode_bits(upper_32_bits(paddr),
BUFFER_ADDR_INFO1_ADDR) |
u32_encode_bits(tid, DP_REO_QREF_NUM);
+ ath12k_hal_reo_shared_qaddr_cache_clear(ab);
}
static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
@@ -801,8 +836,8 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
return;
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
@@ -810,10 +845,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
if (ret) {
ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
+ rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
+ kfree(rx_tid->qbuf.vaddr);
+ rx_tid->qbuf.vaddr = NULL;
}
if (peer->mlo)
@@ -824,15 +859,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->active = false;
}
-/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
- * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
- * that.
- */
-static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
- struct hal_reo_dest_ring *ring,
- enum hal_wbm_rel_bm_act action)
+int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action)
{
- struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
struct hal_wbm_release_ring *desc;
struct ath12k_dp *dp = &ab->dp;
struct hal_srng *srng;
@@ -850,7 +880,7 @@ static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
goto exit;
}
- ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
+ ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
exit:
ath12k_hal_srng_access_end(ab, srng);
@@ -863,14 +893,17 @@ exit:
static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
bool rel_link_desc)
{
+ struct ath12k_buffer_addr *buf_addr_info;
struct ath12k_base *ab = rx_tid->ab;
lockdep_assert_held(&ab->base_lock);
if (rx_tid->dst_ring_desc) {
- if (rel_link_desc)
- ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
+ if (rel_link_desc) {
+ buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
+ ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
kfree(rx_tid->dst_ring_desc);
rx_tid->dst_ring_desc = NULL;
}
@@ -909,8 +942,8 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
struct ath12k_hal_reo_cmd cmd = {0};
int ret;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
@@ -934,18 +967,67 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
return 0;
}
+static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
+ struct ath12k_sta *ahsta,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u16 ssn, enum hal_pn_type pn_type)
+{
+ u32 ba_win_sz = rx_tid->ba_win_sz;
+ struct ath12k_reoq_buf *buf;
+ void *vaddr, *vaddr_aligned;
+ dma_addr_t paddr_aligned;
+ u8 tid = rx_tid->tid;
+ u32 hw_desc_sz;
+ int ret;
+
+ buf = &ahsta->reoq_bufs[tid];
+ if (!buf->vaddr) {
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr)
+ return -ENOMEM;
+
+ vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+ ret = dma_mapping_error(ab->dev, paddr_aligned);
+ if (ret) {
+ kfree(vaddr);
+ return ret;
+ }
+
+ buf->vaddr = vaddr;
+ buf->paddr_aligned = paddr_aligned;
+ buf->size = hw_desc_sz;
+ }
+
+ rx_tid->qbuf = *buf;
+ rx_tid->active = true;
+
+ return 0;
+}
+
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
- struct hal_rx_reo_queue *addr_aligned;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
struct ath12k_dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- void *vaddr;
- dma_addr_t paddr;
+ dma_addr_t paddr_aligned;
int ret;
spin_lock_bh(&ab->base_lock);
@@ -957,7 +1039,8 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (!peer->primary_link) {
+ if (ab->hw_params->dp_primary_link_only &&
+ !peer->primary_link) {
spin_unlock_bh(&ab->base_lock);
return 0;
}
@@ -977,9 +1060,9 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
}
rx_tid = &peer->rx_tid[tid];
+ paddr_aligned = rx_tid->qbuf.paddr_aligned;
/* Update the tid queue if it is already setup */
if (rx_tid->active) {
- paddr = rx_tid->paddr;
ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
ba_win_sz, ssn, true);
spin_unlock_bh(&ab->base_lock);
@@ -991,8 +1074,8 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
if (!ab->hw_params->reoq_lut_support) {
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac,
- paddr, tid, 1,
- ba_win_sz);
+ paddr_aligned, tid,
+ 1, ba_win_sz);
if (ret) {
ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
tid, ret);
@@ -1007,61 +1090,34 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
rx_tid->ba_win_sz = ba_win_sz;
- /* TODO: Optimize the memory allocation for qos tid based on
- * the actual BA window size in REO tid update path.
- */
- if (tid == HAL_DESC_REO_NON_QOS_TID)
- hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
- else
- hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
-
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
- spin_unlock_bh(&ab->base_lock);
- return -ENOMEM;
- }
-
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
if (ret) {
spin_unlock_bh(&ab->base_lock);
- goto err_mem_free;
+ ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
+ return ret;
}
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
- rx_tid->size = hw_desc_sz;
- rx_tid->active = true;
-
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
if (peer->mlo)
- ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid,
+ paddr_aligned);
else
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid,
+ paddr_aligned);
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ paddr_aligned, tid, 1,
+ ba_win_sz);
}
return ret;
-
-err_mem_free:
- kfree(vaddr);
-
- return ret;
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
@@ -1196,8 +1252,8 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
- cmd.addr_lo = lower_32_bits(rx_tid->paddr);
- cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
@@ -1784,8 +1840,12 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
- peer_id);
+ hw_peer_id);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -1823,6 +1883,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
+ bool is_continuation;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -1871,7 +1932,8 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
rem_len = msdu_len - buf_first_len;
while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
rxcb = ATH12K_SKB_RXCB(skb);
- if (rxcb->is_continuation)
+ is_continuation = rxcb->is_continuation;
+ if (is_continuation)
buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
else
buf_len = rem_len;
@@ -1889,7 +1951,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
dev_kfree_skb_any(skb);
rem_len -= buf_len;
- if (!rxcb->is_continuation)
+ if (!is_continuation)
break;
}
@@ -1914,21 +1976,14 @@ static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_base *ab = ar->ab;
- bool ip_csum_fail, l4_csum_fail;
-
- ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
- l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
-
- msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
- CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
-static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
- enum hal_encrypt_type enctype)
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2122,10 +2177,13 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_base *ab = ar->ab;
size_t hdr_len, crypto_len;
- struct ieee80211_hdr *hdr;
- u16 qos_ctl;
- __le16 fc;
- u8 *crypto_hdr;
+ struct ieee80211_hdr hdr;
+ __le16 qos_ctl;
+ u8 *crypto_hdr, mesh_ctrl;
+
+ ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+ hdr_len = ieee80211_hdrlen(hdr.frame_control);
+ mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
@@ -2133,27 +2191,21 @@ static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
}
- fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
- hdr_len = ieee80211_hdrlen(fc);
skb_push(msdu, hdr_len);
- hdr = (struct ieee80211_hdr *)msdu->data;
- hdr->frame_control = fc;
-
- /* Get wifi header from rx_desc */
- ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+ memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
if (rxcb->is_mcbc)
status->flag &= ~RX_FLAG_PN_VALIDATED;
/* Add QOS header */
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_ctl = rxcb->tid;
- if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
- qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
- /* TODO: Add other QoS ctl fields when required */
- memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
- &qos_ctl, IEEE80211_QOS_CTL_LEN);
+ qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
+ if (mesh_ctrl)
+ qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
+
+ memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
}
}
@@ -2229,10 +2281,10 @@ static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
}
struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct hal_rx_desc *rx_desc = rxcb->rx_desc;
struct ath12k_peer *peer = NULL;
lockdep_assert_held(&ab->base_lock);
@@ -2243,40 +2295,41 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
if (peer)
return peer;
- if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
- return NULL;
+ if (rx_info->addr2_present)
+ peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
- peer = ath12k_peer_find_by_addr(ab,
- ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
- rx_desc));
return peer;
}
static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+ struct ath12k_dp_rx_info *rx_info)
{
- bool fill_crypto_hdr;
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
struct ieee80211_hdr *hdr;
struct ath12k_peer *peer;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u32 err_bitmap;
/* PN for multicast packets will be checked in mac80211 */
rxcb = ATH12K_SKB_RXCB(msdu);
- fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
- rxcb->is_mcbc = fill_crypto_hdr;
+ rxcb->is_mcbc = rx_info->is_mcbc;
if (rxcb->is_mcbc)
- rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+ rxcb->peer_id = rx_info->peer_id;
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
if (peer) {
+ /* resetting mcbc bit because mcbc packets are unicast
+ * packets only for AP as STA sends unicast packets.
+ */
+ rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
+
if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
@@ -2305,7 +2358,7 @@ static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
if (is_decrypted) {
rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
- if (fill_crypto_hdr)
+ if (rx_info->is_mcbc)
rx_status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
@@ -2313,37 +2366,28 @@ static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
RX_FLAG_PN_VALIDATED;
}
- ath12k_dp_rx_h_csum_offload(ar, msdu);
+ ath12k_dp_rx_h_csum_offload(msdu, rx_info);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
enctype, rx_status, is_decrypted);
- if (!is_decrypted || fill_crypto_hdr)
+ if (!is_decrypted || rx_info->is_mcbc)
return;
- if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
- DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
}
-static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_base *ab = ar->ab;
struct ieee80211_supported_band *sband;
- enum rx_msdu_start_pkt_type pkt_type;
- u8 bw;
- u8 rate_mcs, nss;
- u8 sgi;
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
+ enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
+ u8 bw = rx_info->bw, sgi = rx_info->sgi;
+ u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
bool is_cck;
- pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
- bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
- rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
- nss = ath12k_dp_rx_h_nss(ab, rx_desc);
- sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
-
switch (pkt_type) {
case RX_MSDU_START_PKT_TYPE_11A:
case RX_MSDU_START_PKT_TYPE_11B:
@@ -2412,10 +2456,35 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
}
}
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
+ struct ath12k_dp_rx_info *rx_info)
{
- struct ath12k_base *ab = ar->ab;
+ rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
+ rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
+ rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
+ rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
+ rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+ rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
+ rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
+ rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
+ rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+ rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+ rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+
+ if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
+ ether_addr_copy(rx_info->addr2,
+ ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
+ rx_info->addr2_present = true;
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(*rx_desc));
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+{
+ struct ieee80211_rx_status *rx_status = rx_info->rx_status;
u8 channel_num;
u32 center_freq, meta_data;
struct ieee80211_channel *channel;
@@ -2429,12 +2498,12 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
- meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+ meta_data = rx_info->phy_meta_data;
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= ATH12K_MIN_6G_FREQ &&
- center_freq <= ATH12K_MAX_6G_FREQ) {
+ if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
@@ -2450,20 +2519,18 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
ieee80211_frequency_to_channel(channel->center_freq);
}
spin_unlock_bh(&ar->data_lock);
- ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
- rx_desc, sizeof(*rx_desc));
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
- ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+ ath12k_dp_rx_h_rate(ar, rx_info);
}
static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
static const struct ieee80211_radiotap_he known = {
@@ -2476,6 +2543,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
struct ieee80211_sta *pubsta;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status *status = rx_info->rx_status;
u8 decap = DP_RX_DECAP_TYPE_RAW;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
@@ -2488,10 +2556,10 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
}
if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+ decap = rx_info->decap_type;
spin_lock_bh(&ab->base_lock);
- peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+ peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
pubsta = peer ? peer->sta : NULL;
@@ -2566,7 +2634,7 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
return true;
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
WARN_ON_ONCE(1);
return false;
}
@@ -2574,7 +2642,7 @@ static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
- struct ieee80211_rx_status *rx_status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
@@ -2634,10 +2702,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
goto free_out;
}
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
- ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
+ ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
- rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+ rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
return 0;
@@ -2657,12 +2726,16 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
struct ath12k *ar;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
+ struct ath12k_dp_rx_info rx_info;
u8 hw_link_id, pdev_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rx_status;
+
rcu_read_lock();
while ((msdu = __skb_dequeue(msdu_list))) {
@@ -2683,7 +2756,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
continue;
}
- ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
"Unable to process msdu %d", ret);
@@ -2691,7 +2764,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
continue;
}
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
rcu_read_unlock();
@@ -2724,9 +2797,9 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_link *hw_links = ag->hw_links;
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
@@ -2743,7 +2816,7 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
__skb_queue_head_init(&msdu_list);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
@@ -2804,13 +2877,14 @@ try_again:
DMA_FROM_DEVICE);
num_buffs_reaped[device_id]++;
+ ab->device_stats.reo_rx[ring_id][ab->device_id]++;
push_reason = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[ring_id]++;
+ ab->device_stats.hal_reo_error[ring_id]++;
continue;
}
@@ -2860,7 +2934,7 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -2984,6 +3058,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
struct ieee80211_key_conf *key_conf;
struct ieee80211_hdr *hdr;
+ struct ath12k_dp_rx_info rx_info;
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
@@ -2994,6 +3069,9 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
return 0;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = rxs;
+
hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
@@ -3020,6 +3098,8 @@ mic_fail:
(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
+
rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
@@ -3027,7 +3107,7 @@ mic_fail:
if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
return -EINVAL;
- ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath12k_dp_rx_h_ppdu(ar, &rx_info);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
@@ -3242,8 +3322,15 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
- queue_addr_hi = upper_32_bits(rx_tid->paddr);
+ if (ab->hw_params->reoq_lut_support) {
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
+ queue_addr_hi = 0;
+ } else {
+ reo_ent_ring->queue_addr_lo =
+ cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
+ queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ }
+
reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
le32_encode_bits(dst_ind,
@@ -3439,7 +3526,7 @@ static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
goto out_unlock;
}
} else {
- ath12k_dp_rx_link_desc_return(ab, ring_desc,
+ ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3552,7 +3639,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
dev_kfree_skb_any(msdu);
- ath12k_dp_rx_link_desc_return(ar->ab, desc,
+ ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
exit:
@@ -3564,9 +3651,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int budget)
{
struct ath12k_hw_group *ag = ab->ag;
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
struct hal_rx_msdu_link *link_desc_va;
@@ -3588,7 +3675,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
tot_n_bufs_reaped = 0;
quota = budget;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
reo_except = &ab->dp.reo_except_ring;
@@ -3602,7 +3689,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
drop = false;
- ab->soc_stats.err_ring_pkts++;
+ ab->device_stats.err_ring_pkts++;
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
@@ -3629,9 +3716,10 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
if (rbm != partner_ab->dp.idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
- ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab,
+ &reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_REL_MSDU);
continue;
}
@@ -3649,7 +3737,8 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
drop = true;
/* Return the link desc back to wbm idle list */
- ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
+ ath12k_dp_rx_link_desc_return(partner_ab,
+ &reo_desc->buf_addr_info,
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
@@ -3676,7 +3765,7 @@ exit:
spin_unlock_bh(&srng->lock);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -3716,7 +3805,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
}
static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status,
+ struct ath12k_dp_rx_info *rx_info,
struct sk_buff_head *msdu_list)
{
struct ath12k_base *ab = ar->ab;
@@ -3772,11 +3861,11 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
return -EINVAL;
- ath12k_dp_rx_h_ppdu(ar, desc, status);
+ ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
+ ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
- ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
-
- rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
+ rxcb->tid = rx_info->tid;
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3786,17 +3875,17 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
}
static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status,
+ struct ath12k_dp_rx_info *rx_info,
struct sk_buff_head *msdu_list)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
bool drop = false;
- ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+ ar->ab->device_stats.reo_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
- if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
drop = true;
break;
case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
@@ -3817,7 +3906,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
}
static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
u16 msdu_len;
@@ -3831,24 +3920,33 @@ static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "invalid msdu len in tkip mic err %u\n", msdu_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
+ sizeof(*desc));
+ return true;
+ }
+
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
return true;
- ath12k_dp_rx_h_ppdu(ar, desc, status);
+ ath12k_dp_rx_h_ppdu(ar, rx_info);
- status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
- RX_FLAG_DECRYPTED);
+ rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
ath12k_dp_rx_h_undecap(ar, msdu, desc,
- HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
return false;
}
static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
- struct ieee80211_rx_status *status)
+ struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
@@ -3856,14 +3954,15 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
bool drop = false;
u32 err_bitmap;
- ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+ ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
switch (rxcb->err_code) {
case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
- drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+ drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
break;
}
fallthrough;
@@ -3885,14 +3984,18 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
+ struct ath12k_dp_rx_info rx_info;
bool drop = true;
+ rx_info.addr2_present = false;
+ rx_info.rx_status = &rxs;
+
switch (rxcb->err_rel_src) {
case HAL_WBM_REL_SRC_MODULE_REO:
- drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
break;
case HAL_WBM_REL_SRC_MODULE_RXDMA:
- drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
break;
default:
/* msdu will get freed */
@@ -3904,13 +4007,13 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
return;
}
- ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
- struct list_head rx_desc_used_list[ATH12K_MAX_SOCS];
+ struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
struct ath12k_hw_group *ag = ab->ag;
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
@@ -3921,9 +4024,10 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int num_buffs_reaped[ATH12K_MAX_SOCS] = {};
+ int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
int total_num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
struct ath12k_hw_link *hw_links = ag->hw_links;
struct ath12k_base *partner_ab;
u8 hw_link_id, device_id;
@@ -3933,7 +4037,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
__skb_queue_head_init(&msdu_list);
__skb_queue_head_init(&scatter_msdu_list);
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++)
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
@@ -4057,7 +4161,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!total_num_buffs_reaped)
goto done;
- for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) {
+ for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
if (!num_buffs_reaped[device_id])
continue;
@@ -4097,6 +4201,12 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
dev_kfree_skb_any(msdu);
continue;
}
+
+ if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
+ device_id = ar->ab->device_id;
+ device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
+ }
+
ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
@@ -4186,6 +4296,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
void ath12k_dp_rx_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i;
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
@@ -4193,6 +4304,10 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+ if (!ab->hw_params->rxdma1_enable) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ath12k_dp_srng_cleanup(ab, srng);
+ }
}
for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
@@ -4341,6 +4456,19 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id =
+ dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4355,6 +4483,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
int ath12k_dp_rx_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
+ struct dp_srng *srng;
int i, ret;
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
@@ -4402,6 +4531,23 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
+ } else {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath12k_dp_srng_setup(ab, srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup mon status ring %d\n",
+ i);
+ return ret;
+ }
+ }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4472,15 +4618,15 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return ret;
}
- /* if rxdma1_enable is false, no need to setup
- * rxdma_mon_desc_ring.
- */
- if (!ar->ab->hw_params->rxdma1_enable)
- return 0;
-
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
+ if (!ar->ab->hw_params->rxdma1_enable)
+ return 0;
+
+ INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
+ pmon->mon_mpdu = NULL;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 88e42365a9d8..e971a314bd2d 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -14,11 +14,9 @@
struct ath12k_dp_rx_tid {
u8 tid;
- u32 *vaddr;
- dma_addr_t paddr;
- u32 size;
u32 ba_win_sz;
bool active;
+ struct ath12k_reoq_buf qbuf;
/* Info related to rx fragments */
u32 cur_sn;
@@ -65,6 +63,24 @@ struct ath12k_dp_rx_rfc1042_hdr {
__be16 snap_type;
} __packed;
+struct ath12k_dp_rx_info {
+ struct ieee80211_rx_status *rx_status;
+ u32 phy_meta_data;
+ u16 peer_id;
+ u8 decap_type;
+ u8 pkt_type;
+ u8 sgi;
+ u8 rate_mcs;
+ u8 bw;
+ u8 nss;
+ u8 addr2[ETH_ALEN];
+ u8 tid;
+ bool ip_csum_fail;
+ bool l4_csum_fail;
+ bool is_mcbc;
+ bool addr2_present;
+};
+
static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
{
u32 ret = 0;
@@ -131,13 +147,13 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc);
struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+ struct ath12k_dp_rx_info *rx_info);
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc);
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc);
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status);
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
@@ -145,4 +161,17 @@ int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
void *data);
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
+ struct ath12k_dp_rx_info *rx_info);
+
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype);
+u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
+int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buf_addr_info,
+ enum hal_wbm_rel_bm_act action);
+bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index ced232bf4aed..b6816b6c2c04 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -7,6 +7,7 @@
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
+#include "debugfs.h"
#include "hw.h"
#include "peer.h"
#include "mac.h"
@@ -83,6 +84,7 @@ static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
u8 pool_id)
{
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ tx_desc->skb_ext_desc = NULL;
list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
@@ -219,7 +221,8 @@ out:
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb, bool gsn_valid, int mcbc_gsn)
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
@@ -229,7 +232,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct hal_tcl_data_cmd *hal_tcl_desc;
struct hal_tx_msdu_ext_desc *msg;
- struct sk_buff *skb_ext_desc;
+ struct sk_buff *skb_ext_desc = NULL;
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ath12k_vif *ahvif = arvif->ahvif;
@@ -347,7 +350,7 @@ tcl_ring_sel:
default:
/* TODO: Take care of other encap modes as well */
ret = -EINVAL;
- atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ atomic_inc(&ab->device_stats.tx_err.misc_fail);
goto fail_remove_tx_buf;
}
@@ -370,7 +373,7 @@ tcl_ring_sel:
map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
- atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ atomic_inc(&ab->device_stats.tx_err.misc_fail);
ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
ret = -ENOMEM;
goto fail_remove_tx_buf;
@@ -415,23 +418,21 @@ map:
if (ret < 0) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX,
"Failed to add HTT meta data, dropping packet\n");
- kfree_skb(skb_ext_desc);
- goto fail_unmap_dma;
+ goto fail_free_ext_skb;
}
}
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
- if (ret) {
- kfree_skb(skb_ext_desc);
- goto fail_unmap_dma;
- }
+ if (ret)
+ goto fail_free_ext_skb;
ti.data_len = skb_ext_desc->len;
ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
skb_cb->paddr_ext_desc = ti.paddr;
+ tx_desc->skb_ext_desc = skb_ext_desc;
}
hal_ring_id = tx_ring->tcl_data_ring.ring_id;
@@ -447,7 +448,7 @@ map:
* desc because the desc is directly enqueued onto hw queue.
*/
ath12k_hal_srng_access_end(ab, tcl_ring);
- ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ ab->device_stats.tx_err.desc_na[ti.ring_id]++;
spin_unlock_bh(&tcl_ring->lock);
ret = -ENOMEM;
@@ -462,9 +463,22 @@ map:
ring_selector++;
}
- goto fail_unmap_dma;
+ goto fail_unmap_dma_ext;
}
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_encap_type[ti.encap_type]++;
+ arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
+ arvif->link_stats.tx_desc_type[ti.type]++;
+
+ if (is_mcast)
+ arvif->link_stats.tx_bcast_mcast++;
+ else
+ arvif->link_stats.tx_enqueued++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
+ ab->device_stats.tx_enqueued[ti.ring_id]++;
+
ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
ath12k_hal_srng_access_end(ab, tcl_ring);
@@ -478,16 +492,24 @@ map:
return 0;
-fail_unmap_dma:
- dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-
+fail_unmap_dma_ext:
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc),
+ skb_ext_desc->len,
DMA_TO_DEVICE);
+fail_free_ext_skb:
+ kfree_skb(skb_ext_desc);
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
fail_remove_tx_buf:
ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_dropped++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+
if (tcl_ring_retry)
goto tcl_ring_sel;
@@ -495,20 +517,23 @@ fail_remove_tx_buf:
}
static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
- struct sk_buff *msdu, u8 mac_id,
- struct dp_tx_ring *tx_ring)
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
struct ath12k *ar;
+ struct sk_buff *msdu = desc_params->skb;
struct ath12k_skb_cb *skb_cb;
- u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
ieee80211_free_txskb(ar->ah->hw, msdu);
@@ -518,26 +543,46 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
- struct sk_buff *msdu,
+ struct ath12k_tx_desc_params *desc_params,
struct dp_tx_ring *tx_ring,
struct ath12k_dp_htt_wbm_tx_status *ts)
{
struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
struct ath12k *ar;
+ struct sk_buff *msdu = desc_params->skb;
skb_cb = ATH12K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
ar = skb_cb->ar;
+ ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
+
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ rcu_read_lock();
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ rcu_read_unlock();
+ }
memset(&info->status, 0, sizeof(info->status));
@@ -560,10 +605,9 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
}
static void
-ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
- void *desc, u8 mac_id,
- struct sk_buff *msdu,
- struct dp_tx_ring *tx_ring)
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_tx_desc_params *desc_params)
{
struct htt_tx_wbm_completion *status_desc;
struct ath12k_dp_htt_wbm_tx_status ts = {0};
@@ -573,19 +617,21 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
+ ab->device_stats.fw_tx_status[wbm_status]++;
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
- ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+ ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
- ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+ ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
/* This event is to be handled only when the driver decides to
@@ -593,7 +639,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
*/
break;
default:
- ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
break;
}
}
@@ -717,13 +763,18 @@ static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status
}
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
+ struct ath12k_tx_desc_params *desc_params,
+ struct hal_tx_status *ts,
+ int ring)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
+ struct ath12k_link_vif *arvif;
struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_vif *vif;
+ struct ath12k_vif *ahvif;
+ struct sk_buff *msdu = desc_params->skb;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -731,11 +782,14 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
}
skb_cb = ATH12K_SKB_CB(msdu);
+ ab->device_stats.tx_completed[ring]++;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
- if (skb_cb->paddr_ext_desc)
+ if (skb_cb->paddr_ext_desc) {
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
- sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+ desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(desc_params->skb_ext_desc);
+ }
rcu_read_lock();
@@ -749,6 +803,17 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
goto exit;
}
+ vif = skb_cb->vif;
+ if (vif) {
+ ahvif = ath12k_vif_to_ahvif(vif);
+ arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+ if (arvif) {
+ spin_lock_bh(&arvif->link_stats_lock);
+ arvif->link_stats.tx_completed++;
+ spin_unlock_bh(&arvif->link_stats_lock);
+ }
+ }
+
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
@@ -842,12 +907,14 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct ath12k_tx_desc_info *tx_desc = NULL;
- struct sk_buff *msdu;
struct hal_tx_status ts = { 0 };
+ struct ath12k_tx_desc_params desc_params;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
struct hal_wbm_release_ring *desc;
- u8 mac_id, pdev_id;
+ u8 pdev_id;
u64 desc_va;
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason rel_status;
spin_lock_bh(&status_ring->lock);
@@ -900,28 +967,37 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
continue;
}
- msdu = tx_desc->skb;
- mac_id = tx_desc->mac_id;
+ desc_params.mac_id = tx_desc->mac_id;
+ desc_params.skb = tx_desc->skb;
+ desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
+
+ /* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
+ buf_rel_source = le32_get_bits(tx_status->info0,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
+ ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
+
+ rel_status = le32_get_bits(tx_status->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+ ab->device_stats.tqm_rel_reason[rel_status]++;
/* Release descriptor as soon as extracting necessary info
* to reduce contention
*/
ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
- ath12k_dp_tx_process_htt_tx_complete(ab,
- (void *)tx_status,
- mac_id, msdu,
- tx_ring);
+ ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
+ tx_ring, &desc_params);
continue;
}
- pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
- ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+ ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts,
+ tx_ring->tcl_data_ring_id);
}
}
@@ -1431,6 +1507,11 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
}
if (ab->hw_params->rxdma1_enable) {
@@ -1448,6 +1529,44 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
return ret;
}
}
+ return 0;
+ }
+
+ if (!reset) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon rx buf %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ }
+
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ i,
+ HAL_RXDMA_MONITOR_STATUS,
+ RX_MON_STATUS_BUF_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for mon status buf %d\n",
+ ret);
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index a5904097dc34..10acdcf1fa8f 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -17,7 +17,8 @@ struct ath12k_dp_htt_wbm_tx_status {
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb, bool gsn_valid, int mcbc_gsn);
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+ bool is_mcast);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
index 5be4b2d4a19d..5ac497f80cad 100644
--- a/drivers/net/wireless/ath/ath12k/fw.c
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -99,6 +99,8 @@ static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
__set_bit(i, ab->fw.fw_features);
}
+ ab->fw.fw_features_valid = true;
+
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
ab->fw.fw_features,
sizeof(ab->fw.fw_features));
@@ -169,3 +171,8 @@ void ath12k_fw_unmap(struct ath12k_base *ab)
release_firmware(ab->fw.fw);
memset(&ab->fw, 0, sizeof(ab->fw));
}
+
+bool ath12k_fw_feature_supported(struct ath12k_base *ab, enum ath12k_fw_features feat)
+{
+ return ab->fw.fw_features_valid && test_bit(feat, ab->fw.fw_features);
+}
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
index 273c003eff3b..7afaefed5086 100644
--- a/drivers/net/wireless/ath/ath12k/fw.h
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_FW_H
@@ -32,5 +32,6 @@ enum ath12k_fw_features {
void ath12k_fw_map(struct ath12k_base *ab);
void ath12k_fw_unmap(struct ath12k_base *ab);
+bool ath12k_fw_feature_supported(struct ath12k_base *ab, enum ath12k_fw_features feat);
#endif /* ATH12K_FW_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index cd59ff8e6c7b..a301898e5849 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -154,7 +154,14 @@ static const struct hal_srng_config hw_srng_config_template[] = {
.ring_dir = HAL_SRNG_DIR_SRC,
.max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
},
- [HAL_RXDMA_MONITOR_STATUS] = { 0, },
+ [HAL_RXDMA_MONITOR_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
[HAL_RXDMA_MONITOR_DESC] = { 0, },
[HAL_RXDMA_DIR_BUF] = {
.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
@@ -449,8 +456,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
- RX_MPDU_START_INFO6_MCAST_BCAST;
+ return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
+ RX_MSDU_END_INFO5_DA_IS_MCBC;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -511,11 +518,6 @@ static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
}
-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
-}
-
static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
@@ -552,9 +554,9 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
s = &hal->srng_config[HAL_TCL_DATA];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
- s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
@@ -566,29 +568,29 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_CE_DST_STATUS];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
@@ -736,7 +738,6 @@ const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
- .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
@@ -908,8 +909,8 @@ static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc
static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
- RX_MPDU_START_INFO6_MCAST_BCAST;
+ return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) &
+ RX_MSDU_END_INFO5_DA_IS_MCBC;
}
static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -975,11 +976,6 @@ ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
}
-static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
-}
-
static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
@@ -1080,8 +1076,6 @@ const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
- .rx_desc_get_mpdu_frame_ctl =
- ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
@@ -1330,11 +1324,6 @@ static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
}
-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
- return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
-}
-
static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
{
struct ath12k_hal *hal = &ab->hal;
@@ -1371,9 +1360,9 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
s = &hal->srng_config[HAL_TCL_DATA];
s->max_rings = 5;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
- s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
s = &hal->srng_config[HAL_TCL_CMD];
@@ -1386,31 +1375,31 @@ static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
s = &hal->srng_config[HAL_CE_SRC];
s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
- HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->max_rings = 12;
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
- s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
- s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
- HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s = &hal->srng_config[HAL_WBM_IDLE_LINK];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
@@ -1555,7 +1544,6 @@ const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
- .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
@@ -1756,7 +1744,7 @@ static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
u32_encode_bits((srng->entry_size * srng->num_entries),
HAL_TCL1_RING_BASE_MSB_RING_SIZE);
- ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
@@ -1962,7 +1950,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
{
u32 len;
- len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
return len;
@@ -2054,6 +2042,24 @@ int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
}
+void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_hp;
+
+ return desc;
+}
+
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -2087,6 +2093,17 @@ void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
return desc;
}
+void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng)
{
@@ -2132,7 +2149,7 @@ void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
else
- srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
}
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 94e2e8735958..0ee9c6b26dab 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@@ -11,6 +11,7 @@
#include "rx_desc.h"
struct ath12k_base;
+#define HAL_CE_REMAP_REG_BASE (ab->ce_remap_base_addr)
#define HAL_LINK_DESC_SIZE (32 << 2)
#define HAL_LINK_DESC_ALIGN 128
@@ -21,6 +22,7 @@ struct ath12k_base;
#define HAL_MAX_AVAIL_BLK_RES 3
#define HAL_RING_BASE_ALIGN 8
+#define HAL_REO_QLUT_ADDR_ALIGN 256
#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
/* TODO: Check with hw team on the supported scatter buf size */
@@ -39,15 +41,21 @@ struct ath12k_base;
#define HAL_OFFSET_FROM_HP_TO_TP 4
#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+#define HAL_REO_QDESC_MAX_PEERID 8191
/* WCSS Relative address */
+#define HAL_SEQ_WCSS_CMEM_OFFSET 0x00100000
#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
-#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x01b80000
-#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x01b81000
-#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x01b82000
-#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x01b83000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce0_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce0_dest_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce1_src_reg_base)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
+ ((ab)->hw_params->regs->hal_umac_ce1_dest_reg_base)
#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
@@ -57,8 +65,10 @@ struct ath12k_base;
/* SW2TCL(x) R0 ring configuration address */
#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020
#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240
-#define HAL_TCL1_RING_BASE_LSB 0x00000900
-#define HAL_TCL1_RING_BASE_MSB 0x00000904
+#define HAL_TCL1_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_base_lsb)
+#define HAL_TCL1_RING_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_base_msb)
#define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id)
#define HAL_TCL1_RING_MISC(ab) \
((ab)->hw_params->regs->hal_tcl1_ring_misc)
@@ -76,30 +86,31 @@ struct ath12k_base;
((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
#define HAL_TCL1_RING_MSI1_DATA(ab) \
((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
-#define HAL_TCL2_RING_BASE_LSB 0x00000978
+#define HAL_TCL2_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl2_ring_base_lsb)
#define HAL_TCL_RING_BASE_LSB(ab) \
((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
-#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
- (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
- (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
- (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_BASE_MSB_OFFSET \
- (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_ID_OFFSET(ab) \
- (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
- (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
- (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MISC_OFFSET(ab) \
- (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MSI1_DATA(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_BASE_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_ID_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_ID(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_TP_ADDR_LSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_TP_ADDR_MSB(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
+#define HAL_TCL1_RING_MISC_OFFSET(ab) ({ typeof(ab) _ab = (ab); \
+ (HAL_TCL1_RING_MISC(_ab) - HAL_TCL1_RING_BASE_LSB(_ab)); })
/* SW2TCL(x) R2 ring pointers (head/tail) address */
#define HAL_TCL1_RING_HP 0x00002000
@@ -132,6 +143,8 @@ struct ath12k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_QDESC_ADDR(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_addr)
+#define HAL_REO1_QDESC_MAX_PEERID(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_max_peerid)
#define HAL_REO1_SW_COOKIE_CFG0(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0)
#define HAL_REO1_SW_COOKIE_CFG1(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1)
#define HAL_REO1_QDESC_LUT_BASE0(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0)
@@ -319,6 +332,8 @@ struct ath12k_base;
#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18)
#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19)
#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20)
+#define HAL_REO_QDESC_ADDR_READ_LUT_ENABLE BIT(7)
+#define HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY BIT(6)
/* CE ring bit field mask and shift */
#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
@@ -365,6 +380,9 @@ struct ath12k_base;
* ath12k_hal_rx_desc_get_err().
*/
+#define HAL_IPQ5332_CE_WFSS_REG_BASE 0x740000
+#define HAL_IPQ5332_CE_SIZE 0x100000
+
enum hal_srng_ring_id {
HAL_SRNG_RING_ID_REO2SW0 = 0,
HAL_SRNG_RING_ID_REO2SW1,
@@ -480,6 +498,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
@@ -1068,7 +1087,6 @@ struct hal_rx_ops {
bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr);
- u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
@@ -1126,6 +1144,7 @@ void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params);
void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng);
+void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng);
void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng);
int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
@@ -1133,6 +1152,8 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
struct hal_srng *srng);
void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
struct hal_srng *srng);
+void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
+ struct hal_srng *srng);
void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
struct hal_srng *srng);
int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
@@ -1154,4 +1175,5 @@ int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
void ath12k_hal_srng_shadow_config(struct ath12k_base *ab);
void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
struct hal_srng *srng);
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 3e8983b85de8..0173f731bfef 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -707,7 +707,7 @@ enum hal_rx_msdu_desc_reo_dest_ind {
#define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29)
#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
- (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
+ (le32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
struct rx_msdu_desc {
__le32 info0;
@@ -1008,6 +1008,10 @@ enum hal_reo_entr_rxdma_ecode {
HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MULTICAST_ECHO_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_MISMATCH_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNAUTH_WDS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_GRPCAST_AMSDU_WDS_ERR,
HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
};
@@ -1284,11 +1288,13 @@ enum hal_tcl_encap_type {
HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
HAL_TCL_ENCAP_TYPE_ETHERNET,
HAL_TCL_ENCAP_TYPE_802_3 = 3,
+ HAL_TCL_ENCAP_TYPE_MAX
};
enum hal_tcl_desc_type {
HAL_TCL_DESC_TYPE_BUFFER,
HAL_TCL_DESC_TYPE_EXT_DESC,
+ HAL_TCL_DESC_TYPE_MAX,
};
enum hal_wbm_htt_tx_comp_status {
@@ -1298,6 +1304,7 @@ enum hal_wbm_htt_tx_comp_status {
HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
};
@@ -1806,6 +1813,7 @@ enum hal_wbm_rel_src_module {
HAL_WBM_REL_SRC_MODULE_REO,
HAL_WBM_REL_SRC_MODULE_FW,
HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_REL_SRC_MODULE_MAX,
};
enum hal_wbm_rel_desc_type {
@@ -1998,6 +2006,7 @@ struct hal_wbm_release_ring_cc_rx {
#define HAL_WBM_RELEASE_INFO3_CONTINUATION BIT(2)
#define HAL_WBM_RELEASE_INFO5_LOOPING_COUNT GENMASK(31, 28)
+#define HAL_ENCRYPT_TYPE_MAX 12
struct hal_wbm_release_ring {
struct ath12k_buffer_addr buf_addr_info;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index ac17d6223fa7..48aa48c48606 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -326,7 +326,7 @@ int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
HAL_REO_DEST_RING_INFO0_PUSH_REASON);
err_code = le32_get_bits(desc->info0,
HAL_REO_DEST_RING_INFO0_ERROR_CODE);
- ab->soc_stats.reo_error[err_code]++;
+ ab->device_stats.reo_error[err_code]++;
if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
@@ -381,7 +381,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_desc->buf_addr_info.info1,
BUFFER_ADDR_INFO1_RET_BUF_MGR);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -393,7 +393,7 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
val = le32_get_bits(wbm_cc_desc->info0,
HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
if (val != HAL_RX_BUF_RBM_SW3_BM) {
- ab->soc_stats.invalid_rbm++;
+ ab->device_stats.invalid_rbm++;
return -EINVAL;
}
@@ -446,17 +446,97 @@ void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
*cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
}
+void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring =
+ (struct hal_reo_entrance_ring *)rx_desc;
+ struct ath12k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = le32_get_bits(rx_mpdu_desc_info_details->info0,
+ RX_MPDU_DESC_INFO0_MSDU_COUNT);
+
+ buf_addr_info = (struct ath12k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)le32_get_bits(buf_addr_info->info1,
+ BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(buf_addr_info->info0,
+ BUFFER_ADDR_INFO0_ADDR);
+
+ *sw_cookie = le32_get_bits(buf_addr_info->info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ *rbm = le32_get_bits(buf_addr_info->info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
+
+void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
+ struct hal_rx_msdu_link *link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ u32 last = 0, first = 0;
+ u8 tmp = 0;
+ int i;
+
+ last = u32_encode_bits(last, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ first = u32_encode_bits(first, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ msdu_details = &link_desc->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (!i && le32_get_bits(msdu_details[i].buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR) == 0)
+ break;
+ if (le32_get_bits(msdu_details[i].buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= cpu_to_le32(last);
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= cpu_to_le32(first);
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= cpu_to_le32(last);
+ msdu_list->msdu_info[i].msdu_flags = le32_to_cpu(msdu_desc_info->info0);
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ le32_get_bits(msdu_details[i].buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ tmp = le32_get_bits(msdu_details[i].buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+ msdu_list->paddr[i] =
+ ((u64)(le32_get_bits(msdu_details[i].buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(msdu_details[i].buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
- struct hal_wbm_release_ring *dst_desc,
- struct hal_wbm_release_ring *src_desc,
+ struct hal_wbm_release_ring *desc,
+ struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action)
{
- dst_desc->buf_addr_info = src_desc->buf_addr_info;
- dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
- HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
- le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
- le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
- HAL_WBM_RELEASE_INFO0_DESC_TYPE);
+ desc->buf_addr_info = *buf_addr_info;
+ desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
+ le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
+ le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_RELEASE_INFO0_DESC_TYPE);
}
void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
@@ -851,3 +931,20 @@ void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
+
+void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
+{
+ u32 val;
+
+ lockdep_assert_held(&ab->base_lock);
+ val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(ab));
+
+ val |= u32_encode_bits(1, HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(ab), val);
+
+ val &= ~HAL_REO_QDESC_ADDR_READ_CLEAR_QDESC_ARRAY;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_ADDR(ab), val);
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index 6bdcd0867d86..a3ab588aae19 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -108,11 +108,12 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
HAL_RX_MON_STATUS_BUF_ADDR,
+ HAL_RX_MON_STATUS_MPDU_START,
HAL_RX_MON_STATUS_MPDU_END,
HAL_RX_MON_STATUS_MSDU_END,
};
-#define HAL_RX_MAX_MPDU 256
+#define HAL_RX_MAX_MPDU 1024
#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5)
struct hal_rx_user_status {
@@ -506,6 +507,18 @@ struct hal_rx_mpdu_start {
__le32 rsvd2[16];
} __packed;
+struct hal_rx_msdu_end {
+ __le32 info0;
+ __le32 rsvd0[9];
+ __le16 info00;
+ __le16 info01;
+ __le32 rsvd00[8];
+ __le32 info1;
+ __le32 rsvd1[10];
+ __le32 info2;
+ __le32 rsvd2;
+} __packed;
+
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
@@ -525,6 +538,7 @@ struct hal_rx_msdu_desc_info {
#define HAL_RX_NUM_MSDU_DESC 6
struct hal_rx_msdu_list {
struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u64 paddr[HAL_RX_NUM_MSDU_DESC];
u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
u8 rbm[HAL_RX_NUM_MSDU_DESC];
};
@@ -1128,8 +1142,8 @@ void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_ms
u32 *msdu_cookies,
enum hal_rx_buf_return_buf_manager *rbm);
void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
- struct hal_wbm_release_ring *dst_desc,
- struct hal_wbm_release_ring *src_desc,
+ struct hal_wbm_release_ring *desc,
+ struct ath12k_buffer_addr *buf_addr_info,
enum hal_wbm_rel_bm_act action);
void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
dma_addr_t paddr, u32 cookie, u8 manager);
@@ -1144,5 +1158,12 @@ int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
struct ath12k_buffer_addr *buff_addr,
dma_addr_t *paddr, u32 *cookie);
+void ath12k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie,
+ struct ath12k_buffer_addr **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt);
+void ath12k_hal_rx_msdu_list_get(struct ath12k *ar,
+ struct hal_rx_msdu_link *link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index a106ebed7870..7e2cf0fb2085 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -118,6 +118,10 @@ static const struct ath12k_hw_ops wcn7850_ops = {
#define ATH12K_TX_MON_RING_MASK_0 0x1
#define ATH12K_TX_MON_RING_MASK_1 0x2
+#define ATH12K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH12K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH12K_RX_MON_STATUS_RING_MASK_2 0x4
+
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
/* CE0: host->target HTC control and raw streams */
@@ -535,6 +539,217 @@ static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850
},
};
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_ipq5332[] = {
+ /* host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* target->host WMI + HTC control */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Target -> host PKTLOG */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Reserved for target autonomous HIF_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE7 Reserved for CV Prefetch */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE8 Reserved for target generic HIF memcpy */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE9 WMI logging/CFR/Spectral/Radar/ */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Unused TBD */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_NONE),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* Unused TBD */
+ {
+ .pipenum = __cpu_to_le32(11),
+ .pipedir = __cpu_to_le32(PIPEDIR_NONE),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_ipq5332[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(5),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+ __cpu_to_le32(PIPEDIR_IN),
+ __cpu_to_le32(9),
+ },
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
.tx = {
ATH12K_TX_RING_MASK_0,
@@ -577,6 +792,46 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
},
};
+static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_ipq5332 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_3,
+ },
+ .rx_mon_dest = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+ ATH12K_TX_MON_RING_MASK_0,
+ ATH12K_TX_MON_RING_MASK_1,
+ },
+};
+
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
@@ -585,6 +840,12 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
},
.rx_mon_dest = {
},
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH12K_RX_MON_STATUS_RING_MASK_0,
+ ATH12K_RX_MON_STATUS_RING_MASK_1,
+ ATH12K_RX_MON_STATUS_RING_MASK_2,
+ },
.rx = {
0, 0, 0,
ATH12K_RX_RING_MASK_0,
@@ -619,6 +880,9 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
+ .hal_tcl1_ring_base_lsb = 0x00000900,
+ .hal_tcl1_ring_base_msb = 0x00000904,
+ .hal_tcl2_ring_base_lsb = 0x00000978,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -681,6 +945,12 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000a84,
+
+ /* CE base address */
+ .hal_umac_ce0_src_reg_base = 0x01b80000,
+ .hal_umac_ce0_dest_reg_base = 0x01b81000,
+ .hal_umac_ce1_src_reg_base = 0x01b82000,
+ .hal_umac_ce1_dest_reg_base = 0x01b83000,
};
static const struct ath12k_hw_regs qcn9274_v2_regs = {
@@ -695,6 +965,9 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
+ .hal_tcl1_ring_base_lsb = 0x00000900,
+ .hal_tcl1_ring_base_msb = 0x00000904,
+ .hal_tcl2_ring_base_lsb = 0x00000978,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -734,6 +1007,8 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
.hal_reo1_sw_cookie_cfg1 = 0x00000070,
.hal_reo1_qdesc_lut_base0 = 0x00000074,
.hal_reo1_qdesc_lut_base1 = 0x00000078,
+ .hal_reo1_qdesc_addr = 0x0000007c,
+ .hal_reo1_qdesc_max_peerid = 0x00000088,
.hal_reo1_ring_base_lsb = 0x00000500,
.hal_reo1_ring_base_msb = 0x00000504,
.hal_reo1_ring_id = 0x00000508,
@@ -761,6 +1036,100 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000aa0,
+
+ /* CE base address */
+ .hal_umac_ce0_src_reg_base = 0x01b80000,
+ .hal_umac_ce0_dest_reg_base = 0x01b81000,
+ .hal_umac_ce1_src_reg_base = 0x01b82000,
+ .hal_umac_ce1_dest_reg_base = 0x01b83000,
+};
+
+static const struct ath12k_hw_regs ipq5332_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000918,
+ .hal_tcl1_ring_misc = 0x00000920,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000092c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000940,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000944,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000958,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000095c,
+ .hal_tcl1_ring_base_lsb = 0x00000910,
+ .hal_tcl1_ring_base_msb = 0x00000914,
+ .hal_tcl1_ring_msi1_data = 0x00000960,
+ .hal_tcl2_ring_base_lsb = 0x00000988,
+ .hal_tcl_ring_base_lsb = 0x00000b68,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d48,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x00000578,
+ .hal_reo1_misc_ctrl_addr = 0x00000b9c,
+ .hal_reo1_sw_cookie_cfg0 = 0x0000006c,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000070,
+ .hal_reo1_qdesc_lut_base0 = 0x00000074,
+ .hal_reo1_qdesc_lut_base1 = 0x00000078,
+ .hal_reo1_ring_base_lsb = 0x00000500,
+ .hal_reo1_ring_base_msb = 0x00000504,
+ .hal_reo1_ring_id = 0x00000508,
+ .hal_reo1_ring_misc = 0x00000510,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000514,
+ .hal_reo1_ring_hp_addr_msb = 0x00000518,
+ .hal_reo1_ring_producer_int_setup = 0x00000524,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000548,
+ .hal_reo1_ring_msi1_base_msb = 0x0000054C,
+ .hal_reo1_ring_msi1_data = 0x00000550,
+ .hal_reo1_aging_thres_ix0 = 0x00000B28,
+ .hal_reo1_aging_thres_ix1 = 0x00000B2C,
+ .hal_reo1_aging_thres_ix2 = 0x00000B30,
+ .hal_reo1_aging_thres_ix3 = 0x00000B34,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000320,
+ .hal_sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x000002A8,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000aa0,
+
+ /* WBM idle link ring address */
+ .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000244,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000250,
+ .hal_wbm_scattered_ring_base_msb = 0x00000254,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+ /* SW2WBM release ring address */
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+
+ /* WBM2SW release ring address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000e08,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000046c,
+
+ /* CE address */
+ .hal_umac_ce0_src_reg_base = 0x00740000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .hal_umac_ce0_dest_reg_base = 0x00741000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .hal_umac_ce1_src_reg_base = 0x00742000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .hal_umac_ce1_dest_reg_base = 0x00743000 -
+ HAL_IPQ5332_CE_WFSS_REG_BASE,
};
static const struct ath12k_hw_regs wcn7850_regs = {
@@ -775,6 +1144,9 @@ static const struct ath12k_hw_regs wcn7850_regs = {
.hal_tcl1_ring_msi1_base_msb = 0x0000094c,
.hal_tcl1_ring_msi1_data = 0x00000950,
.hal_tcl_ring_base_lsb = 0x00000b58,
+ .hal_tcl1_ring_base_lsb = 0x00000900,
+ .hal_tcl1_ring_base_msb = 0x00000904,
+ .hal_tcl2_ring_base_lsb = 0x00000978,
/* TCL STATUS ring address */
.hal_tcl_status_ring_base_lsb = 0x00000d38,
@@ -837,6 +1209,12 @@ static const struct ath12k_hw_regs wcn7850_regs = {
/* REO status ring address */
.hal_reo_status_ring_base = 0x00000a84,
+
+ /* CE base address */
+ .hal_umac_ce0_src_reg_base = 0x01b80000,
+ .hal_umac_ce0_dest_reg_base = 0x01b81000,
+ .hal_umac_ce1_src_reg_base = 0x01b82000,
+ .hal_umac_ce1_dest_reg_base = 0x01b83000,
};
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
@@ -856,6 +1234,26 @@ static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
};
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_ipq5332 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static const struct ce_ie_addr ath12k_ce_ie_addr_ipq5332 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS - HAL_IPQ5332_CE_WFSS_REG_BASE,
+};
+
+static const struct ce_remap ath12k_ce_remap_ipq5332 = {
+ .base = HAL_IPQ5332_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5332_CE_SIZE,
+};
+
static const struct ath12k_hw_params ath12k_hw_params[] = {
{
.name = "qcn9274 hw1.0",
@@ -864,6 +1262,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.dir = "QCN9274/hw1.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
},
.max_radios = 1,
.single_pdev_only = false,
@@ -899,7 +1298,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.download_calib = true,
.supports_suspend = false,
.tcl_ring_retry = true,
- .reoq_lut_support = false,
+ .reoq_lut_support = true,
.supports_shadow_regs = false,
.num_tcl_banks = 48,
@@ -932,6 +1331,14 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.iova_mask = 0,
.supports_aspm = false,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = false,
+
+ .dp_primary_link_only = true,
},
{
.name = "wcn7850 hw2.0",
@@ -941,6 +1348,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.dir = "WCN7850/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 256 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
},
.max_radios = 1,
@@ -972,7 +1380,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO),
- .supports_monitor = false,
+ .supports_monitor = true,
.idle_ps = true,
.download_calib = false,
@@ -1012,6 +1420,14 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
.supports_aspm = true,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = true,
+
+ .dp_primary_link_only = false,
},
{
.name = "qcn9274 hw2.0",
@@ -1020,6 +1436,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.dir = "QCN9274/hw2.0",
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_driver,
},
.max_radios = 2,
.single_pdev_only = false,
@@ -1049,7 +1466,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_AP_VLAN),
- .supports_monitor = false,
+ .supports_monitor = true,
.idle_ps = false,
.download_calib = true,
@@ -1088,6 +1505,92 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.iova_mask = 0,
.supports_aspm = false,
+
+ .ce_ie_addr = NULL,
+ .ce_remap = NULL,
+ .bdf_addr_offset = 0,
+
+ .current_cc_support = false,
+
+ .dp_primary_link_only = true,
+ },
+ {
+ .name = "ipq5332 hw1.0",
+ .hw_rev = ATH12K_HW_IPQ5332_HW10,
+ .fw = {
+ .dir = "IPQ5332/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ .m3_loader = ath12k_m3_fw_loader_remoteproc,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .regs = &ipq5332_regs,
+ .ring_mask = &ath12k_hw_ring_mask_ipq5332,
+
+ .host_ce_config = ath12k_host_ce_config_ipq5332,
+ .ce_count = 12,
+ .target_ce_config = ath12k_target_ce_config_wlan_ipq5332,
+ .target_ce_count = 12,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_ipq5332,
+ .svc_to_ce_map_len = 18,
+
+ .hal_params = &ath12k_hw_hal_params_ipq5332,
+
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .wmi_init = &ath12k_wmi_init_qcn9274,
+
+ .hal_ops = &hal_qcn9274_ops,
+
+ .qmi_cnss_feature_bitmap = BIT(CNSS_QDSS_CFG_MISS_V01),
+
+ .rfkill_pin = 0,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 0,
+
+ .rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = false,
+ .iova_mask = 0,
+ .supports_aspm = false,
+
+ .ce_ie_addr = &ath12k_ce_ie_addr_ipq5332,
+ .ce_remap = &ath12k_ce_remap_ipq5332,
+ .bdf_addr_offset = 0xC00000,
+
+ .dp_primary_link_only = true,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 8d52182e28ae..0fbc17649df4 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@@ -97,6 +97,7 @@
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
+#define ATH12K_IPQ5332_USERPD_ID 1
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
@@ -121,6 +122,7 @@ enum ath12k_hw_rate_ofdm {
enum ath12k_bus {
ATH12K_BUS_PCI,
+ ATH12K_BUS_AHB,
};
#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
@@ -133,6 +135,7 @@ enum hal_encrypt_type;
struct ath12k_hw_ring_mask {
u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
@@ -146,6 +149,11 @@ struct ath12k_hw_hal_params {
u32 wbm2sw_cc_enable;
};
+enum ath12k_m3_fw_loaders {
+ ath12k_m3_fw_loader_driver,
+ ath12k_m3_fw_loader_remoteproc,
+};
+
struct ath12k_hw_params {
const char *name;
u16 hw_rev;
@@ -154,6 +162,7 @@ struct ath12k_hw_params {
const char *dir;
size_t board_size;
size_t cal_offset;
+ enum ath12k_m3_fw_loaders m3_loader;
} fw;
u8 max_radios;
@@ -190,6 +199,7 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
bool supports_aspm:1;
+ bool current_cc_support:1;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -220,6 +230,13 @@ struct ath12k_hw_params {
bool supports_dynamic_smps_6ghz;
u32 iova_mask;
+
+ const struct ce_ie_addr *ce_ie_addr;
+ const struct ce_remap *ce_remap;
+ u32 bdf_addr_offset;
+
+ /* setup REO queue, frag etc only for primary link peer */
+ bool dp_primary_link_only:1;
};
struct ath12k_hw_ops {
@@ -293,9 +310,15 @@ struct ath12k_hw_regs {
u32 hal_tcl1_ring_msi1_base_msb;
u32 hal_tcl1_ring_msi1_data;
u32 hal_tcl_ring_base_lsb;
+ u32 hal_tcl1_ring_base_lsb;
+ u32 hal_tcl1_ring_base_msb;
+ u32 hal_tcl2_ring_base_lsb;
u32 hal_tcl_status_ring_base_lsb;
+ u32 hal_reo1_qdesc_addr;
+ u32 hal_reo1_qdesc_max_peerid;
+
u32 hal_wbm_idle_ring_base_lsb;
u32 hal_wbm_idle_ring_misc_addr;
u32 hal_wbm_r0_idle_list_cntl_addr;
@@ -316,6 +339,11 @@ struct ath12k_hw_regs {
u32 pcie_qserdes_sysclk_en_sel;
u32 pcie_pcs_osc_dtct_config_base;
+ u32 hal_umac_ce0_src_reg_base;
+ u32 hal_umac_ce0_dest_reg_base;
+ u32 hal_umac_ce1_src_reg_base;
+ u32 hal_umac_ce1_dest_reg_base;
+
u32 hal_ppe_rel_ring_base;
u32 hal_reo2_ring_base;
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index dfa05f0ee6c9..88b59f3ff87a 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -229,7 +229,8 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO,
.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
@@ -580,14 +581,22 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
-static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif)
+static struct ath12k_link_vif *
+ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif,
+ struct ieee80211_bss_conf *link_conf)
{
+ struct ieee80211_bss_conf *tx_bss_conf;
+ struct ath12k *ar = arvif->ar;
struct ath12k_vif *tx_ahvif;
- if (arvif->ahvif->vif->mbssid_tx_vif) {
- tx_ahvif = ath12k_vif_to_ahvif(arvif->ahvif->vif->mbssid_tx_vif);
- if (tx_ahvif)
- return &tx_ahvif->deflink;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ tx_bss_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ link_conf->tx_bss_conf);
+ if (tx_bss_conf) {
+ tx_ahvif = ath12k_vif_to_ahvif(tx_bss_conf->vif);
+ return wiphy_dereference(tx_ahvif->ah->hw->wiphy,
+ tx_ahvif->link[tx_bss_conf->link_id]);
}
return NULL;
@@ -874,12 +883,12 @@ static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BA
{
switch (band1) {
case NL80211_BAND_2GHZ:
- if (band2 & WMI_HOST_WLAN_2G_CAP)
+ if (band2 & WMI_HOST_WLAN_2GHZ_CAP)
return true;
break;
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
- if (band2 & WMI_HOST_WLAN_5G_CAP)
+ if (band2 & WMI_HOST_WLAN_5GHZ_CAP)
return true;
break;
default:
@@ -980,7 +989,7 @@ static int ath12k_mac_txpower_recalc(struct ath12k *ar)
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
txpower / 2);
- if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) &&
ar->txpower_limit_2g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
ret = ath12k_wmi_pdev_set_param(ar, param,
@@ -990,7 +999,7 @@ static int ath12k_mac_txpower_recalc(struct ath12k *ar)
ar->txpower_limit_2g = txpower;
}
- if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) &&
ar->txpower_limit_5g != txpower) {
param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
ret = ath12k_wmi_pdev_set_param(ar, param,
@@ -1245,61 +1254,6 @@ static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
-{
- struct ath12k_pdev *pdev = ar->pdev;
- struct ath12k_wmi_vdev_create_arg arg = {};
- int bit, ret;
- u8 tmp_addr[6];
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
- if (ar->monitor_vdev_created)
- return 0;
-
- if (ar->ab->free_vdev_map == 0) {
- ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
- return -ENOMEM;
- }
-
- bit = __ffs64(ar->ab->free_vdev_map);
-
- ar->monitor_vdev_id = bit;
-
- arg.if_id = ar->monitor_vdev_id;
- arg.type = WMI_VDEV_TYPE_MONITOR;
- arg.subtype = WMI_VDEV_SUBTYPE_NONE;
- arg.pdev_id = pdev->pdev_id;
- arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
-
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
- arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
- arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
- }
-
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
- arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
- arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
- }
-
- ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
- if (ret) {
- ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
- ar->monitor_vdev_id, ret);
- ar->monitor_vdev_id = -1;
- return ret;
- }
-
- ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
- ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
- ar->num_created_vdevs++;
- ar->monitor_vdev_created = true;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
- ar->monitor_vdev_id);
-
- return 0;
-}
-
static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
{
int ret;
@@ -1336,19 +1290,9 @@ static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
return ret;
}
-static void
-ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *conf,
- void *data)
-{
- struct cfg80211_chan_def **def = data;
-
- *def = &conf->def;
-}
-
static int ath12k_mac_monitor_start(struct ath12k *ar)
{
- struct cfg80211_chan_def *chandef = NULL;
+ struct ath12k_mac_get_any_chanctx_conf_arg arg;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -1356,25 +1300,33 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
if (ar->monitor_started)
return 0;
+ arg.ar = ar;
+ arg.chanctx_conf = NULL;
ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
- ath12k_mac_get_any_chandef_iter,
- &chandef);
- if (!chandef)
+ ath12k_mac_get_any_chanctx_conf_iter,
+ &arg);
+ if (!arg.chanctx_conf)
return 0;
- ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id,
+ &arg.chanctx_conf->def);
if (ret) {
ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
- ath12k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "fail to set monitor filter: %d\n", ret);
return ret;
}
ar->monitor_started = true;
ar->num_started_vdevs++;
- ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
- return ret;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
}
static int ath12k_mac_monitor_stop(struct ath12k *ar)
@@ -1440,58 +1392,9 @@ err:
return ret;
}
-static int ath12k_mac_config(struct ath12k *ar, u32 changed)
-{
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
- struct ieee80211_conf *conf = &hw->conf;
- int ret = 0;
-
- lockdep_assert_wiphy(hw->wiphy);
-
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
- if (ar->monitor_conf_enabled) {
- if (ar->monitor_vdev_created)
- return ret;
- ret = ath12k_mac_monitor_vdev_create(ar);
- if (ret)
- return ret;
- ret = ath12k_mac_monitor_start(ar);
- if (ret)
- goto err_mon_del;
- } else {
- if (!ar->monitor_vdev_created)
- return ret;
- ret = ath12k_mac_monitor_stop(ar);
- if (ret)
- return ret;
- ath12k_mac_monitor_vdev_delete(ar);
- }
- }
-
- return ret;
-
-err_mon_del:
- ath12k_mac_monitor_vdev_delete(ar);
- return ret;
-}
-
static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- int ret;
-
- lockdep_assert_wiphy(hw->wiphy);
-
- ar = ath12k_ah_to_ar(ah, 0);
-
- ret = ath12k_mac_config(ar, changed);
- if (ret)
- ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
- ar->pdev_idx, ret);
-
- return ret;
+ return 0;
}
static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_link_vif *arvif,
@@ -1715,7 +1618,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
return -ENOLINK;
}
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
if (tx_arvif != arvif && arvif->is_up)
return 0;
@@ -1785,6 +1688,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
{
struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
int ret;
@@ -1817,7 +1721,15 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab,
+ "unable to access bss link conf for link %u required to retrieve transmitting link conf\n",
+ arvif->link_id);
+ return;
+ }
+
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = info->bssid_index;
@@ -3027,6 +2939,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
const struct ieee80211_sta_eht_cap *eht_cap;
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *link_conf;
u32 *rx_mcs, *tx_mcs;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -3038,6 +2951,12 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
return;
}
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access link_conf in peer assoc eht set\n");
+ return;
+ }
+
eht_cap = &link_sta->eht_cap;
he_cap = &link_sta->he_cap;
if (!he_cap->has_he || !eht_cap->has_eht)
@@ -3109,6 +3028,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
}
arg->punct_bitmap = ~arvif->punct_bitmap;
+ arg->eht_disable_mcs15 = link_conf->eht_disable_mcs15;
}
static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
@@ -3139,6 +3059,7 @@ static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
ml->ml_peer_id = ahsta->ml_peer_id;
ml->ieee_link_id = arsta->link_id;
ml->num_partner_links = 0;
+ ml->eml_cap = sta->eml_cap;
links = ahsta->links_map;
rcu_read_lock();
@@ -3377,6 +3298,11 @@ static void ath12k_bss_assoc(struct ath12k *ar,
if (ret)
ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
arvif->vdev_id, ret);
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE)
+ ath12k_mac_11d_scan_stop_all(ar->ab);
}
static void ath12k_bss_disassoc(struct ath12k *ar,
@@ -3450,7 +3376,10 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
}
sband = hw->wiphy->bands[def->chan->band];
- basic_rate_idx = ffs(bss_conf->basic_rates) - 1;
+ if (bss_conf->basic_rates)
+ basic_rate_idx = __ffs(bss_conf->basic_rates);
+ else
+ basic_rate_idx = 0;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -3495,6 +3424,9 @@ static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif,
arvif->ahvif = ahvif;
arvif->link_id = _link_id;
+ /* Protects the datapath stats update on a per link basis */
+ spin_lock_init(&arvif->link_stats_lock);
+
INIT_LIST_HEAD(&arvif->list);
INIT_DELAYED_WORK(&arvif->connection_loss_work,
ath12k_mac_vif_sta_connection_loss_work);
@@ -3534,6 +3466,11 @@ static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
arvif->vdev_id, arvif->link_id);
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE)
+ ath12k_mac_11d_scan_stop(ar);
+
if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
if (ret)
@@ -3556,23 +3493,18 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
if (arvif)
return arvif;
- if (!vif->valid_links) {
- /* Use deflink for Non-ML VIFs and mark the link id as 0
- */
- link_id = 0;
+ /* If this is the first link arvif being created for an ML VIF
+ * use the preallocated deflink memory except for scan arvifs
+ */
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ arvif->is_sta_assoc_link = true;
} else {
- /* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory except for scan arvifs
- */
- if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
- arvif = &ahvif->deflink;
- } else {
- arvif = (struct ath12k_link_vif *)
- kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
- if (!arvif)
- return NULL;
- }
+ arvif = kzalloc(sizeof(*arvif), GFP_KERNEL);
+ if (!arvif)
+ return NULL;
}
ath12k_mac_init_arvif(ahvif, arvif, link_id);
@@ -3702,6 +3634,8 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
unsigned long links = ahvif->links_map;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
struct ath12k *ar;
u8 link_id;
@@ -3714,6 +3648,35 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* only in station mode we can get here, so it's safe
+ * to use ap_addr
+ */
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ WARN_ONCE(1, "failed to find sta with addr %pM\n",
+ vif->cfg.ap_addr);
+ return;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[ahsta->assoc_link_id]);
+ rcu_read_unlock();
+
+ ar = arvif->ar;
+ /* there is no reason for which an assoc link's
+ * bss info does not exist
+ */
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ ath12k_bss_assoc(ar, arvif, info);
+
+ /* exclude assoc link as it is done above */
+ links &= ~BIT(ahsta->assoc_link_id);
+ }
+
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
if (!arvif || !arvif->ar)
@@ -3789,6 +3752,18 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
psmode, arvif->vdev_id, ret);
}
+static bool ath12k_mac_supports_station_tpc(struct ath12k *ar,
+ struct ath12k_vif *ahvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath12k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
static void ath12k_mac_bss_info_changed(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *info,
@@ -3983,12 +3958,16 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
band = def.chan->band;
mcast_rate = info->mcast_rate[band];
- if (mcast_rate > 0)
+ if (mcast_rate > 0) {
rateidx = mcast_rate - 1;
- else
- rateidx = ffs(info->basic_rates) - 1;
+ } else {
+ if (info->basic_rates)
+ rateidx = __ffs(info->basic_rates);
+ else
+ rateidx = 0;
+ }
- if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
bitrate = ath12k_legacy_rates[rateidx].bitrate;
@@ -4162,9 +4141,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
* split the hw request and perform multiple scans
*/
- if (center_freq < ATH12K_MIN_5G_FREQ)
+ if (center_freq < ATH12K_MIN_5GHZ_FREQ)
band = NL80211_BAND_2GHZ;
- else if (center_freq < ATH12K_MIN_6G_FREQ)
+ else if (center_freq < ATH12K_MIN_6GHZ_FREQ)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_6GHZ;
@@ -4194,7 +4173,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
fallthrough;
case ATH12K_SCAN_STARTING:
cancel_delayed_work(&ar->scan.timeout);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk);
break;
}
@@ -4376,6 +4355,145 @@ static int ath12k_start_scan(struct ath12k *ar,
return 0;
}
+int ath12k_mac_get_fw_stats(struct ath12k *ar,
+ struct ath12k_fw_stats_req_params *param)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ unsigned long timeout, time_left;
+ int ret;
+
+ guard(mutex)(&ah->hw_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * 1000);
+ ath12k_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
+ param->vdev_id, param->pdev_id);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ param->pdev_id, param->vdev_id, param->stats_id);
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
+ if (!time_left) {
+ ath12k_warn(ab, "time out while waiting for get fw stats\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
+ * when stats data buffer limit is reached. fw_stats_complete
+ * is completed once host receives first event from firmware, but
+ * still end might not be marked in the TLV.
+ * Below loop is to confirm that firmware completed sending all the event
+ * and fw_stats_done is marked true when end is marked in the TLV.
+ */
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+ spin_lock_bh(&ar->data_lock);
+ if (ar->fw_stats.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+static int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ int *dbm)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_fw_stats_req_params params = {};
+ struct ath12k_fw_stats_pdev *pdev;
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_base *ab;
+ struct ath12k *ar;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->ar)
+ return -EINVAL;
+
+ ar = arvif->ar;
+ ab = ar->ab;
+ if (ah->state != ATH12K_HW_STATE_ON)
+ goto err_fallback;
+
+ if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags))
+ return -EAGAIN;
+
+ /* Limit the requests to Firmware for fetching the tx power */
+ if (ar->chan_tx_pwr != ATH12K_PDEV_TX_POWER_INVALID &&
+ time_before(jiffies,
+ msecs_to_jiffies(ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS) +
+ ar->last_tx_power_update))
+ goto send_tx_power;
+
+ params.pdev_id = ar->pdev->pdev_id;
+ params.vdev_id = arvif->vdev_id;
+ params.stats_id = WMI_REQUEST_PDEV_STAT;
+ ret = ath12k_mac_get_fw_stats(ar, &params);
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath12k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power reported by firmware is in units of 0.5 dBm */
+ ar->chan_tx_pwr = pdev->chan_tx_power / 2;
+ spin_unlock_bh(&ar->data_lock);
+ ar->last_tx_power_update = jiffies;
+
+send_tx_power:
+ *dbm = ar->chan_tx_pwr;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower fetched from firmware %d dBm\n",
+ *dbm);
+ return 0;
+
+err_fallback:
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
static u8
ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
{
@@ -4429,7 +4547,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
return -EINVAL;
/* check if any of the links of ML VIF is already started on
- * radio(ar) correpsondig to given scan frequency and use it,
+ * radio(ar) corresponding to given scan frequency and use it,
* if not use scan link (link 15) for scan purpose.
*/
link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
@@ -4538,10 +4656,16 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
ret = ath12k_start_scan(ar, arg);
if (ret) {
- ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ if (ret == -EBUSY)
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "scan engine is busy 11d state %d\n", ar->state_11d);
+ else
+ ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH12K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
+ goto exit;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
@@ -4565,6 +4689,11 @@ exit:
kfree(arg);
}
+ if (ar->state_11d == ATH12K_11D_PREPARING &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE)
+ ath12k_mac_11d_scan_start(ar, arvif->vdev_id);
+
return ret;
}
@@ -4605,7 +4734,6 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
.macaddr = macaddr,
};
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -4624,8 +4752,8 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
- /* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -4633,12 +4761,10 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
- case WLAN_CIPHER_SUITE_CCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
- break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
default:
ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
@@ -4658,7 +4784,7 @@ install:
if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
return -ETIMEDOUT;
- if (ether_addr_equal(macaddr, vif->addr))
+ if (ether_addr_equal(macaddr, arvif->bssid))
ahvif->key_cipher = key->cipher;
return ar->install_key_status ? -EINVAL : 0;
@@ -5524,10 +5650,13 @@ static int ath12k_mac_station_add(struct ath12k *ar,
ar->max_num_stations);
goto exit;
}
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar) && !arsta->rx_stats) {
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
}
peer_param.vdev_id = arvif->vdev_id;
@@ -5669,12 +5798,15 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
+ struct ath12k_reg_info *reg_info;
+ struct ath12k_base *ab = ar->ab;
int ret = 0;
lockdep_assert_wiphy(hw->wiphy);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
arsta->link_id, arsta->addr, old_state, new_state);
/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
@@ -5684,7 +5816,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST)) {
ret = ath12k_mac_station_remove(ar, arvif, arsta);
if (ret) {
- ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ ath12k_warn(ab, "Failed to remove station: %pM for VDEV: %d\n",
arsta->addr, arvif->vdev_id);
goto exit;
}
@@ -5695,7 +5827,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
- ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ ath12k_warn(ab, "Failed to add station: %pM for VDEV: %d\n",
arsta->addr, arvif->vdev_id);
/* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
@@ -5708,7 +5840,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
- ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
+ ath12k_warn(ab, "Failed to associate station: %pM\n",
arsta->addr);
/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
@@ -5717,9 +5849,21 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
ret = ath12k_mac_station_authorize(ar, arvif, arsta);
- if (ret)
- ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+ if (ret) {
+ ath12k_warn(ab, "Failed to authorize station: %pM\n",
arsta->addr);
+ goto exit;
+ }
+
+ if (ath12k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ reg_info = ab->reg_info[ar->pdev_idx];
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "connection done, update reg rules\n");
+ ath12k_hw_to_ah(hw)->regd_updated = false;
+ ath12k_reg_handle_chan_list(ab, reg_info, arvif->ahvif->vdev_type,
+ link_conf->power_type);
+ }
/* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
* deauthorize it.
@@ -5738,7 +5882,7 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC)) {
ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
- ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ ath12k_warn(ab, "Failed to disassociate station: %pM\n",
arsta->addr);
}
@@ -5802,6 +5946,17 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
* link sta
*/
if (sta->mlo) {
+ /* For station mode, arvif->is_sta_assoc_link has been set when
+ * vdev starts. Make sure the arvif/arsta pair have same setting
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !arsta->arvif->is_sta_assoc_link) {
+ ath12k_hw_warn(ah, "failed to verify assoc link setting with link id %u\n",
+ link_id);
+ ret = -EINVAL;
+ goto exit;
+ }
+
arsta->is_assoc_link = true;
ahsta->assoc_link_id = link_id;
}
@@ -6475,7 +6630,7 @@ static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
- if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
if (ht_cap_info)
@@ -6484,7 +6639,7 @@ static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
rate_cap_rx_chainmask);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
(ar->ab->hw_params->single_pdev_only ||
!ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
@@ -6661,6 +6816,8 @@ static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
switch (iftype) {
case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[2] &=
+ ~IEEE80211_HE_MAC_CAP2_BCAST_TWT;
he_cap_elem->phy_cap_info[3] &=
~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
he_cap_elem->phy_cap_info[9] |=
@@ -6893,7 +7050,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
enum nl80211_band band;
int count;
- if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
band = NL80211_BAND_2GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
@@ -6903,7 +7060,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
count);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
band = NL80211_BAND_5GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
ar->mac.iftype[band],
@@ -6913,7 +7070,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
count);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
ar->supports_6ghz) {
band = NL80211_BAND_6GHZ;
count = ath12k_mac_copy_sband_iftype_data(ar, cap,
@@ -7041,14 +7198,17 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
{
struct ath12k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
struct ieee80211_tx_info *info;
+ enum hal_encrypt_type enctype;
+ unsigned int mic_len;
dma_addr_t paddr;
int buf_id;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ATH12K_SKB_CB(skb)->ar = ar;
+ skb_cb->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
@@ -7057,12 +7217,15 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
- if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ if ((skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ enctype = ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+ mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype);
+ skb_put(skb, mic_len);
}
}
@@ -7073,7 +7236,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
goto err_free_idr;
}
- ATH12K_SKB_CB(skb)->paddr = paddr;
+ skb_cb->paddr = paddr;
ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
if (ret) {
@@ -7084,7 +7247,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
return 0;
err_unmap_buf:
- dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ dma_unmap_single(ab->dev, skb_cb->paddr,
skb->len, DMA_TO_DEVICE);
err_free_idr:
spin_lock_bh(&ar->txmgmt_idr_lock);
@@ -7337,12 +7500,18 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ath12k_peer *peer;
unsigned long links_map;
bool is_mcast = false;
+ bool is_dvlan = false;
struct ethhdr *eth;
bool is_prb_rsp;
u16 mcbc_gsn;
u8 link_id;
int ret;
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
@@ -7397,9 +7566,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
- if (!vif->valid_links || !is_mcast ||
+ /* Checking if it is a DVLAN frame */
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control))
+ is_dvlan = true;
+
+ if (!vif->valid_links || !is_mcast || is_dvlan ||
test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
- ret = ath12k_dp_tx(ar, arvif, skb, false, 0);
+ ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast);
if (unlikely(ret)) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->ah->hw, skb);
@@ -7429,11 +7605,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
info_flags);
skb_cb = ATH12K_SKB_CB(msdu_copied);
- info = IEEE80211_SKB_CB(msdu_copied);
skb_cb->link_id = link_id;
/* For open mode, skip peer find logic */
- if (unlikely(ahvif->key_cipher == WMI_CIPHER_NONE))
+ if (unlikely(!ahvif->key_cipher))
goto skip_peer_find;
spin_lock_bh(&tmp_ar->ab->base_lock);
@@ -7452,7 +7627,6 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
if (key) {
skb_cb->cipher = key->cipher;
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
- info->control.hw_key = key;
hdr = (struct ieee80211_hdr *)msdu_copied->data;
if (!ieee80211_has_protected(hdr->frame_control))
@@ -7463,7 +7637,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skip_peer_find:
ret = ath12k_dp_tx(tmp_ar, tmp_arvif,
- msdu_copied, true, mcbc_gsn);
+ msdu_copied, true, mcbc_gsn, is_mcast);
if (unlikely(ret)) {
if (ret == -ENOMEM) {
/* Drops are expected during heavy multicast
@@ -7549,7 +7723,7 @@ static int ath12k_mac_start(struct ath12k *ar)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
+ ath12k_err(ab, "failed to enable PMF QOS: %d\n", ret);
goto err;
}
@@ -7594,12 +7768,13 @@ static int ath12k_mac_start(struct ath12k *ar)
/* TODO: Do we need to enable ANI? */
- ath12k_reg_update_chan_list(ar);
+ ath12k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
ar->num_peers = 0;
ar->allocated_vdev_map = 0;
+ ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
/* Configure monitor status ring with default rx_filter to get rx status
* such as rssi, rx_duration.
@@ -7781,6 +7956,9 @@ static void ath12k_mac_stop(struct ath12k *ar)
wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->rfkill_work);
+ cancel_work_sync(&ar->ab->update_11d_work);
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
@@ -7854,7 +8032,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -ENOLINK;
}
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (!tx_arvif)
return 0;
@@ -7903,15 +8081,15 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
return ret;
}
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
}
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
}
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
ar->supports_6ghz) {
arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
@@ -7940,7 +8118,7 @@ ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
u32 *hecap_phy_ptr = NULL;
u32 hemode;
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP)
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
else
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
@@ -8071,44 +8249,121 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
ath12k_mac_update_vif_offload(&ahvif->deflink);
}
-int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
+static bool ath12k_mac_vif_ap_active_any(struct ath12k_base *ab)
{
- struct ath12k_hw *ah = ar->ah;
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ah->hw;
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param = {0};
- struct ieee80211_bss_conf *link_conf;
- u32 param_id, param_value;
- u16 nss;
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ struct ath12k_link_vif *arvif;
int i;
- int ret, vdev_id;
- u8 link_id;
- lockdep_assert_wiphy(hw->wiphy);
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up &&
+ arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+ return true;
+ }
+ }
+ return false;
+}
- /* If no link is active and scan vdev is requested
- * use a default link conf for scan address purpose.
- */
- if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
- link_id = ffs(vif->valid_links) - 1;
- else
- link_id = arvif->link_id;
+void ath12k_mac_11d_scan_start(struct ath12k *ar, u32 vdev_id)
+{
+ struct wmi_11d_scan_start_arg arg;
+ int ret;
- link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
- if (!link_conf) {
- ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return -ENOLINK;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ar->regdom_set_by_user)
+ goto fin;
+
+ if (ar->vdev_id_11d_scan != ATH12K_11D_INVALID_VDEV_ID)
+ goto fin;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ goto fin;
+
+ if (ath12k_mac_vif_ap_active_any(ar->ab))
+ goto fin;
+
+ arg.vdev_id = vdev_id;
+ arg.start_interval_msec = 0;
+ arg.scan_period_msec = ATH12K_SCAN_11D_INTERVAL;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac start 11d scan for vdev %d\n", vdev_id);
+
+ ret = ath12k_wmi_send_11d_scan_start_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = vdev_id;
+ if (ar->state_11d == ATH12K_11D_PREPARING)
+ ar->state_11d = ATH12K_11D_RUNNING;
}
- memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
+fin:
+ if (ar->state_11d == ATH12K_11D_PREPARING) {
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+}
- arvif->ar = ar;
- vdev_id = __ffs64(ab->free_vdev_map);
- arvif->vdev_id = vdev_id;
+void ath12k_mac_11d_scan_stop(struct ath12k *ar)
+{
+ int ret;
+ u32 vdev_id;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ return;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac stop 11d for vdev %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->state_11d == ATH12K_11D_PREPARING) {
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ if (ar->vdev_id_11d_scan != ATH12K_11D_INVALID_VDEV_ID) {
+ vdev_id = ar->vdev_id_11d_scan;
+
+ ret = ath12k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to stopt 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = ATH12K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH12K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+ }
+}
+
+void ath12k_mac_11d_scan_stop_all(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac stop soc 11d scan\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ ath12k_mac_11d_scan_stop(ar);
+ }
+}
+
+static void ath12k_mac_determine_vdev_type(struct ieee80211_vif *vif,
+ struct ath12k_vif *ahvif)
+{
ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
@@ -8132,7 +8387,6 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
case NL80211_IFTYPE_MONITOR:
ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
- ar->monitor_vdev_id = vdev_id;
break;
case NL80211_IFTYPE_P2P_DEVICE:
ahvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -8142,6 +8396,53 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
WARN_ON(1);
break;
}
+}
+
+int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
+{
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ah->hw;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
+ struct ieee80211_bss_conf *link_conf = NULL;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret, vdev_id;
+ u8 link_id;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ /* In NO_VIRTUAL_MONITOR, its necessary to restrict only one monitor
+ * interface in each radio
+ */
+ if (vif->type == NL80211_IFTYPE_MONITOR && ar->monitor_vdev_created)
+ return -EINVAL;
+
+ link_id = arvif->link_id;
+
+ if (link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+ }
+
+ if (link_conf)
+ memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
+ else
+ memcpy(arvif->bssid, vif->addr, ETH_ALEN);
+
+ arvif->ar = ar;
+ vdev_id = __ffs64(ab->free_vdev_map);
+ arvif->vdev_id = vdev_id;
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ ar->monitor_vdev_id = vdev_id;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype,
@@ -8205,6 +8506,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
arvif->vdev_id, ret);
goto err_peer_del;
}
+ ath12k_mac_11d_scan_stop_all(ar->ab);
break;
case WMI_VDEV_TYPE_STA:
param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
@@ -8243,12 +8545,26 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
arvif->vdev_id, ret);
goto err_peer_del;
}
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH12K_11D_PREPARING;
+ }
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ ar->monitor_vdev_created = true;
break;
default:
break;
}
- arvif->txpower = link_conf->txpower;
+ if (link_conf)
+ arvif->txpower = link_conf->txpower;
+ else
+ arvif->txpower = NL80211_TX_POWER_AUTOMATIC;
+
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -8263,8 +8579,6 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
}
ath12k_dp_vdev_tx_attach(ar, arvif);
- if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
- ath12k_mac_monitor_vdev_create(ar);
return ret;
@@ -8289,6 +8603,11 @@ err_peer_del:
}
err_vdev_del:
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ }
+
ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
arvif->is_created = false;
@@ -8483,7 +8802,10 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_reg_info *reg_info;
struct ath12k_link_vif *arvif;
+ struct ath12k_base *ab;
+ struct ath12k *ar;
int i;
lockdep_assert_wiphy(hw->wiphy);
@@ -8502,6 +8824,22 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ ath12k_mac_determine_vdev_type(vif, ahvif);
+
+ for_each_ar(ah, ar, i) {
+ if (!ath12k_wmi_supports_6ghz_cc_ext(ar))
+ continue;
+
+ ab = ar->ab;
+ reg_info = ab->reg_info[ar->pdev_idx];
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "interface added to change reg rules\n");
+ ah->regd_updated = false;
+ ath12k_reg_handle_chan_list(ab, reg_info, ahvif->vdev_type,
+ IEEE80211_REG_UNSET_AP);
+ break;
+ }
+
/* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
* will not know if this interface is an ML vif at this point.
*/
@@ -8566,8 +8904,6 @@ static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arv
if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
- } else if (ar->monitor_vdev_created && !ar->monitor_started) {
- ret = ath12k_mac_monitor_vdev_delete(ar);
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
@@ -8798,6 +9134,7 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
*/
ar->rx_channel = ctx->def.chan;
spin_unlock_bh(&ar->data_lock);
+ ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
return 0;
}
@@ -8826,6 +9163,7 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
*/
ar->rx_channel = NULL;
spin_unlock_bh(&ar->data_lock);
+ ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
}
static enum wmi_phy_mode
@@ -8917,6 +9255,9 @@ ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
* link vdevs which are advertised as partners below
*/
ml_arg->link_add = true;
+
+ ml_arg->assoc_link = arvif->is_sta_assoc_link;
+
partner_info = ml_arg->partner_info;
links = ahvif->links_map;
@@ -9055,6 +9396,15 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
return ret;
}
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath12k_mac_supports_station_tpc(ar, ahvif, chandef)) {
+ ath12k_mac_fill_reg_tpc_info(ar, arvif, ctx);
+ ath12k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
ar->num_started_vdevs++;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
ahvif->vif->addr, arvif->vdev_id);
@@ -9257,8 +9607,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
ahvif->link[link_id]);
- if (vif->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
monitor_vif = true;
+ continue;
+ }
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
@@ -9308,7 +9660,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = link_conf->bssid_index;
@@ -9408,16 +9760,26 @@ static int ath12k_start_vdev_delay(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ieee80211_bss_conf *link_conf;
int ret;
if (WARN_ON(arvif->is_started))
return -EBUSY;
- ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ab, "failed to get link conf for vdev %u\n", arvif->vdev_id);
+ return -EINVAL;
+ }
+
+ chanctx = wiphy_dereference(ath12k_ar_to_hw(arvif->ar)->wiphy,
+ link_conf->chanctx_conf);
+ ret = ath12k_mac_vdev_start(arvif, chanctx);
if (ret) {
ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
arvif->vdev_id, vif->addr,
- arvif->chanctx.def.chan->center_freq, ret);
+ chanctx->def.chan->center_freq, ret);
return ret;
}
@@ -9435,6 +9797,391 @@ static int ath12k_start_vdev_delay(struct ath12k *ar,
return 0;
}
+static u8 ath12k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ case NL80211_CHAN_WIDTH_320:
+ return 16;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ case NL80211_CHAN_WIDTH_320:
+ return 5;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath12k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ * bandwidth=320 MHz, center frequency is 6105, lowest channel is 1
+ * with center frequency 5955, its diff is 6105 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_320:
+ diff_seq = 150;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath12k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath12k_mac_get_psd_channel(struct ath12k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath12k_mac_get_eirp_power(struct ath12k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath12k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->ah->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
+ psd_power, tx_power, eirp_power;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath12k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels = ath12k_mac_get_num_pwr_levels(&ctx->def);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath12k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath12k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath12k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath12k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath12k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath12k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params->idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params->idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath12k_reg_ap_pwr_convert(bss_conf->power_type);
+}
+
+static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
+ struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_bss_conf *bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ struct ath12k_reg_tpc_power_info *tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_parsed_tpe_eirp *local_non_psd, *reg_non_psd;
+ struct ieee80211_parsed_tpe_psd *local_psd, *reg_psd;
+ struct ieee80211_parsed_tpe *tpe = &bss_conf->tpe;
+ enum wmi_reg_6g_client_type client_type;
+ struct ath12k_reg_info *reg_info;
+ struct ath12k_base *ab = ar->ab;
+ bool psd_valid, non_psd_valid;
+ int i;
+
+ reg_info = ab->reg_info[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ local_psd = &tpe->psd_local[client_type];
+ reg_psd = &tpe->psd_reg_client[client_type];
+ local_non_psd = &tpe->max_local[client_type];
+ reg_non_psd = &tpe->max_reg_client[client_type];
+
+ psd_valid = local_psd->valid | reg_psd->valid;
+ non_psd_valid = local_non_psd->valid | reg_non_psd->valid;
+
+ if (!psd_valid && !non_psd_valid) {
+ ath12k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ };
+
+ if (psd_valid) {
+ tpc_info->is_psd_power = true;
+
+ tpc_info->num_pwr_levels = max(local_psd->count,
+ reg_psd->count);
+ if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
+ tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+
+ for (i = 0; i < tpc_info->num_pwr_levels; i++) {
+ tpc_info->tpe[i] = min(local_psd->power[i],
+ reg_psd->power[i]) / 2;
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, tpc_info->tpe[i]);
+ }
+ } else {
+ tpc_info->is_psd_power = false;
+ tpc_info->eirp_power = 0;
+
+ tpc_info->num_pwr_levels = max(local_non_psd->count,
+ reg_non_psd->count);
+ if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
+ tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+
+ for (i = 0; i < tpc_info->num_pwr_levels; i++) {
+ tpc_info->tpe[i] = min(local_non_psd->power[i],
+ reg_non_psd->power[i]) / 2;
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, tpc_info->tpe[i]);
+ }
+ }
+}
+
static int
ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -9462,8 +10209,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
if (!ar) {
- ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
- vif->addr, link_id);
+ ath12k_hw_warn(ah, "failed to assign chanctx for vif %pM link id %u link vif is already started",
+ vif->addr, link_id);
return -EINVAL;
}
@@ -9473,6 +10220,11 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
+ if (ath12k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath12k_mac_parse_tx_pwr_env(ar, arvif);
+
arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
@@ -9480,7 +10232,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ahvif->vdev_type != WMI_VDEV_TYPE_AP &&
ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
!ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
- memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
}
@@ -9492,8 +10243,10 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
- if (ret)
+ if (ret) {
+ ath12k_mac_monitor_vdev_delete(ar);
goto out;
+ }
arvif->is_started = true;
goto out;
@@ -9507,9 +10260,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
- ath12k_mac_monitor_start(ar);
-
arvif->is_started = true;
/* TODO: Setup ps and cts/rts protection */
@@ -9572,9 +10322,18 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
arvif->is_started = false;
- if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
- ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
- ath12k_mac_monitor_stop(ar);
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map) &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ ar->state_11d != ATH12K_11D_PREPARING) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH12K_11D_PREPARING;
+ }
+
+ if (ar->scan.arvif == arvif && ar->scan.state == ATH12K_SCAN_RUNNING) {
+ ath12k_scan_abort(ar);
+ ar->scan.arvif = NULL;
+ }
}
static int
@@ -10136,6 +10895,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
+ if (ar->ab->hw_params->current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ struct wmi_set_current_country_arg arg = {};
+
+ memcpy(&arg.alpha2, ar->alpha2, 2);
+ ath12k_wmi_send_set_current_country_cmd(ar, &arg);
+ }
+
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
@@ -10267,46 +11034,13 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
-static int ath12k_mac_get_fw_stats(struct ath12k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
- unsigned long time_left;
- int ret;
-
- guard(mutex)(&ah->hw_mutex);
-
- if (ah->state != ATH12K_HW_STATE_ON)
- return -ENETDOWN;
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath12k_wmi_send_stats_request_cmd(ar, stats_id, vdev_id, pdev_id);
-
- if (ret) {
- ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
- return ret;
- }
-
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
- pdev_id, vdev_id, stats_id);
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
- if (!time_left)
- ath12k_warn(ab, "time out while waiting for get fw stats\n");
-
- return ret;
-}
-
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_fw_stats_req_params params = {};
struct ath12k_link_sta *arsta;
struct ath12k *ar;
s8 signal;
@@ -10348,10 +11082,13 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
/* TODO: Use real NF instead of default one. */
signal = arsta->rssi_comb;
+ params.pdev_id = ar->pdev->pdev_id;
+ params.vdev_id = 0;
+ params.stats_id = WMI_REQUEST_VDEV_STAT;
+
if (!signal &&
ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
- !(ath12k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_VDEV_STAT)))
+ !(ath12k_mac_get_fw_stats(ar, &params)))
signal = arsta->rssi_beacon;
if (signal) {
@@ -10411,7 +11148,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
return -EINVAL;
/* check if any of the links of ML VIF is already started on
- * radio(ar) correpsondig to given scan frequency and use it,
+ * radio(ar) corresponding to given scan frequency and use it,
* if not use deflink(link 0) for scan purpose.
*/
@@ -10595,6 +11332,7 @@ static const struct ieee80211_ops ath12k_ops = {
.assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
.unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
.switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
+ .get_txpower = ath12k_mac_op_get_txpower,
.set_rts_threshold = ath12k_mac_op_set_rts_threshold,
.set_frag_threshold = ath12k_mac_op_set_frag_threshold,
.set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
@@ -10610,12 +11348,37 @@ static const struct ieee80211_ops ath12k_ops = {
.resume = ath12k_wow_op_resume,
.set_wakeup = ath12k_wow_op_set_wakeup,
#endif
+#ifdef CONFIG_ATH12K_DEBUGFS
+ .vif_add_debugfs = ath12k_debugfs_op_vif_add,
+#endif
CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
#ifdef CONFIG_ATH12K_DEBUGFS
.link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add,
#endif
};
+void ath12k_mac_update_freq_range(struct ath12k *ar,
+ u32 freq_low, u32 freq_high)
+{
+ if (!(freq_low && freq_high))
+ return;
+
+ if (ar->freq_range.start_freq || ar->freq_range.end_freq) {
+ ar->freq_range.start_freq = min(ar->freq_range.start_freq,
+ MHZ_TO_KHZ(freq_low));
+ ar->freq_range.end_freq = max(ar->freq_range.end_freq,
+ MHZ_TO_KHZ(freq_high));
+ } else {
+ ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
+ ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac pdev %u freq limit updated. New range %u->%u MHz\n",
+ ar->pdev->pdev_id, KHZ_TO_MHZ(ar->freq_range.start_freq),
+ KHZ_TO_MHZ(ar->freq_range.end_freq));
+}
+
static void ath12k_mac_update_ch_list(struct ath12k *ar,
struct ieee80211_supported_band *band,
u32 freq_low, u32 freq_high)
@@ -10630,9 +11393,6 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
-
- ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
- ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -10640,10 +11400,10 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
struct ath12k_pdev *pdev = ar->pdev;
struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
- if (band == WMI_HOST_WLAN_2G_CAP)
+ if (band == WMI_HOST_WLAN_2GHZ_CAP)
return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
- if (band == WMI_HOST_WLAN_5G_CAP)
+ if (band == WMI_HOST_WLAN_5GHZ_CAP)
return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
@@ -10657,18 +11417,19 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
{
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ struct ath12k_base *ab = ar->ab;
+ u32 phy_id, freq_low, freq_high;
struct ath12k_hw *ah = ar->ah;
void *channels;
- u32 phy_id;
BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
ARRAY_SIZE(ath12k_5ghz_channels) +
ARRAY_SIZE(ath12k_6ghz_channels)) !=
ATH12K_NUM_CHANS);
- reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
- if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
channels = kmemdup(ath12k_2ghz_channels,
sizeof(ath12k_2ghz_channels),
GFP_KERNEL);
@@ -10683,17 +11444,25 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->bitrates = ath12k_g_rates;
bands[NL80211_BAND_2GHZ] = band;
- if (ar->ab->hw_params->single_pdev_only) {
- phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2GHZ_CAP);
+ reg_cap = &ab->hal_reg_cap[phy_id];
}
+
+ freq_low = max(reg_cap->low_2ghz_chan,
+ ab->reg_freq_2ghz.start_freq);
+ freq_high = min(reg_cap->high_2ghz_chan,
+ ab->reg_freq_2ghz.end_freq);
+
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_2ghz_chan,
reg_cap->high_2ghz_chan);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
- if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6GHZ_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
@@ -10709,13 +11478,21 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
bands[NL80211_BAND_6GHZ] = band;
+
+ freq_low = max(reg_cap->low_5ghz_chan,
+ ab->reg_freq_6ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan,
+ ab->reg_freq_6ghz.end_freq);
+
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
ah->use_6ghz_regd = true;
}
- if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
+ if (reg_cap->low_5ghz_chan < ATH12K_MIN_6GHZ_FREQ) {
channels = kmemdup(ath12k_5ghz_channels,
sizeof(ath12k_5ghz_channels),
GFP_KERNEL);
@@ -10733,14 +11510,21 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->bitrates = ath12k_a_rates;
bands[NL80211_BAND_5GHZ] = band;
- if (ar->ab->hw_params->single_pdev_only) {
- phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5GHZ_CAP);
+ reg_cap = &ab->hal_reg_cap[phy_id];
}
+ freq_low = max(reg_cap->low_5ghz_chan,
+ ab->reg_freq_5ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan,
+ ab->reg_freq_5ghz.end_freq);
+
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
}
}
@@ -10840,13 +11624,18 @@ ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
comb[0].limits = limits;
comb[0].n_limits = n_limits;
comb[0].max_interfaces = max_interfaces;
- comb[0].num_different_channels = 1;
comb[0].beacon_int_infra_match = true;
comb[0].beacon_int_min_gcd = 100;
- comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+
+ if (ar->ab->hw_params->single_pdev_only) {
+ comb[0].num_different_channels = 2;
+ } else {
+ comb[0].num_different_channels = 1;
+ comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+ }
return 0;
}
@@ -11071,6 +11860,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
for_each_ar(ah, ar, i) {
cancel_work_sync(&ar->regd_update_work);
ath12k_debugfs_unregister(ar);
+ ath12k_fw_stats_reset(ar);
}
ieee80211_unregister_hw(hw);
@@ -11219,6 +12009,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, QUEUE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
+ ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
@@ -11347,11 +12138,22 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
goto err_unregister_hw;
}
+ if (ar->ab->hw_params->current_cc_support && ab->new_alpha2[0]) {
+ struct wmi_set_current_country_arg current_cc = {};
+
+ memcpy(&current_cc.alpha2, ab->new_alpha2, 2);
+ memcpy(&ar->alpha2, ab->new_alpha2, 2);
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &current_cc);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed set cc code for mac register: %d\n",
+ ret);
+ }
+
+ ath12k_fw_stats_init(ar);
ath12k_debugfs_register(ar);
}
- init_completion(&ar->fw_stats_complete);
-
return 0;
err_unregister_hw:
@@ -11396,6 +12198,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
ar->scan.arvif = NULL;
+ ar->vdev_id_11d_scan = ATH12K_11D_INVALID_VDEV_ID;
spin_lock_init(&ar->data_lock);
INIT_LIST_HEAD(&ar->arvifs);
@@ -11411,6 +12214,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->mlo_setup_done);
+ init_completion(&ar->completed_11d_scan);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
@@ -11418,6 +12222,10 @@ static void ath12k_mac_setup(struct ath12k *ar)
wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ ar->monitor_started = false;
}
static int __ath12k_mac_mlo_setup(struct ath12k *ar)
@@ -11572,7 +12380,6 @@ void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag)
int ath12k_mac_register(struct ath12k_hw_group *ag)
{
- struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
int ret;
@@ -11585,8 +12392,6 @@ int ath12k_mac_register(struct ath12k_hw_group *ag)
goto err;
}
- set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
-
return 0;
err:
@@ -11603,12 +12408,9 @@ err:
void ath12k_mac_unregister(struct ath12k_hw_group *ag)
{
- struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
- clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
-
for (i = ag->num_hw - 1; i >= 0; i--) {
ah = ath12k_ag_to_ah(ag, i);
if (!ah)
@@ -11722,6 +12524,7 @@ int ath12k_mac_allocate(struct ath12k_hw_group *ag)
if (!ab)
continue;
+ ath12k_debugfs_pdev_create(ab);
ath12k_mac_set_device_defaults(ab);
total_radio += ab->num_radios;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index ae35b73312bf..e6e74b45bfa4 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -33,6 +33,9 @@ struct ath12k_generic_iter {
#define ATH12K_KEEPALIVE_MAX_IDLE 3895
#define ATH12K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+#define ATH12K_PDEV_TX_POWER_INVALID ((u32)-1)
+#define ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS 5000 /* msecs */
+
/* FIXME: should these be in ieee80211.h? */
#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
@@ -64,8 +67,55 @@ struct ath12k_mac_get_any_chanctx_conf_arg {
struct ieee80211_chanctx_conf *chanctx_conf;
};
+/**
+ * struct ath12k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath12k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/* ath12k only deals with 320 MHz, so 16 subchannels */
+#define ATH12K_NUM_PWR_LEVELS 16
+
+/**
+ * struct ath12k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath12k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6g_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[ATH12K_NUM_PWR_LEVELS];
+ u8 ap_constraint_power;
+ s8 tpe[ATH12K_NUM_PWR_LEVELS];
+ struct ath12k_chan_power_info chan_power_info[ATH12K_NUM_PWR_LEVELS];
+};
+
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
+#define ATH12K_SCAN_11D_INTERVAL 600000
+#define ATH12K_11D_INVALID_VDEV_ID 0xFFFF
+
+void ath12k_mac_11d_scan_start(struct ath12k *ar, u32 vdev_id);
+void ath12k_mac_11d_scan_stop(struct ath12k *ar);
+void ath12k_mac_11d_scan_stop_all(struct ath12k_base *ab);
+
void ath12k_mac_destroy(struct ath12k_hw_group *ag);
void ath12k_mac_unregister(struct ath12k_hw_group *ag);
int ath12k_mac_register(struct ath12k_hw_group *ag);
@@ -115,4 +165,10 @@ struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *
struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 link_id);
+int ath12k_mac_get_fw_stats(struct ath12k *ar, struct ath12k_fw_stats_req_params *param);
+void ath12k_mac_update_freq_range(struct ath12k *ar,
+ u32 freq_low, u32 freq_high);
+void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index 2f6d14382ed7..08f44baf182a 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -285,8 +285,11 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
break;
}
- if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
+ set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue_aux, &ab->reset_work);
+ }
break;
default:
break;
@@ -379,7 +382,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xffffffff;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
mhi_ctrl->sbl_size = SZ_512K;
mhi_ctrl->seg_len = SZ_512K;
mhi_ctrl->fbc_download = true;
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index b474696ac6d8..489d546390fc 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -17,7 +17,7 @@
#include "debug.h"
#define ATH12K_PCI_BAR_NUM 0
-#define ATH12K_PCI_DMA_MASK 32
+#define ATH12K_PCI_DMA_MASK 36
#define ATH12K_PCI_IRQ_CE0_OFFSET 3
@@ -600,7 +600,8 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
ab->hw_params->ring_mask->rx_wbm_rel[i] ||
ab->hw_params->ring_mask->reo_status[i] ||
ab->hw_params->ring_mask->host2rxdma[i] ||
- ab->hw_params->ring_mask->rx_mon_dest[i]) {
+ ab->hw_params->ring_mask->rx_mon_dest[i] ||
+ ab->hw_params->ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
@@ -718,7 +719,7 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
- if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
+ if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID)) {
ab_pci->qmi_instance =
u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
u32_encode_bits(bus->number, BUS_NUMBER_MASK);
@@ -877,13 +878,9 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
goto disable_device;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
- if (ret) {
- ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
- ATH12K_PCI_DMA_MASK, ret);
- goto release_region;
- }
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH12K_PCI_DMA_MASK);
+ dma_set_mask(&pdev->dev, ab_pci->dma_mask);
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
pci_set_master(pdev);
@@ -1472,7 +1469,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
- if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
+ if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID))
ath12k_pci_update_qrtr_node_id(ab);
ret = ath12k_mhi_start(ab_pci);
@@ -1491,6 +1488,9 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+ return;
+
/* restore aspm in case firmware bootup fails */
ath12k_pci_aspm_restore(ab_pci);
@@ -1710,12 +1710,12 @@ err_hal_srng_deinit:
err_mhi_unregister:
ath12k_mhi_unregister(ab_pci);
-err_pci_msi_free:
- ath12k_pci_msi_free(ab_pci);
-
err_irq_affinity_cleanup:
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
+err_pci_msi_free:
+ ath12k_pci_msi_free(ab_pci);
+
err_pci_free_region:
ath12k_pci_free_region(ab_pci);
@@ -1734,8 +1734,6 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
- ath12k_qmi_deinit_service(ab);
- ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@@ -1743,9 +1741,10 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
- ath12k_core_deinit(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
qmi_fail:
+ ath12k_core_deinit(ab);
ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);
@@ -1758,13 +1757,34 @@ qmi_fail:
ath12k_core_free(ab);
}
+static void ath12k_pci_hw_group_power_down(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_pci_power_down(ab, false);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
static void ath12k_pci_shutdown(struct pci_dev *pdev)
{
struct ath12k_base *ab = pci_get_drvdata(pdev);
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath12k_pci_power_down(ab, false);
+ ath12k_pci_hw_group_power_down(ab->ag);
}
static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1831,7 +1851,7 @@ static struct pci_driver ath12k_pci_driver = {
.driver.pm = &ath12k_pci_pm_ops,
};
-static int ath12k_pci_init(void)
+int ath12k_pci_init(void)
{
int ret;
@@ -1844,14 +1864,8 @@ static int ath12k_pci_init(void)
return 0;
}
-module_init(ath12k_pci_init);
-static void ath12k_pci_exit(void)
+void ath12k_pci_exit(void)
{
pci_unregister_driver(&ath12k_pci_driver);
}
-
-module_exit(ath12k_pci_exit);
-
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 31584a7ad80e..0b4c459d6d8e 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@@ -116,6 +116,7 @@ struct ath12k_pci {
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
u32 qmi_instance;
+ u64 dma_mask;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
@@ -145,4 +146,6 @@ void ath12k_pci_stop(struct ath12k_base *ab);
int ath12k_pci_start(struct ath12k_base *ab);
int ath12k_pci_power_up(struct ath12k_base *ab);
void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend);
+int ath12k_pci_init(void);
+void ath12k_pci_exit(void);
#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index 792cca8a3fb1..ec7236bbccc0 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -383,6 +383,9 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->ast_idx = peer->hw_peer_id;
}
+ if (vif->type == NL80211_IFTYPE_AP)
+ peer->ucast_ra_only = true;
+
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index 5870ee11a8c7..f3a5e054d2b5 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PEER_H
@@ -62,6 +62,7 @@ struct ath12k_peer {
/* for reference to ath12k_link_sta */
u8 link_id;
+ bool ucast_ra_only;
};
struct ath12k_ml_peer {
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 348dbc81bad8..99e1fb2910d0 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -11,6 +11,8 @@
#include "debug.h"
#include <linux/of.h>
#include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04
@@ -2168,10 +2170,12 @@ int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
req.bdf_support_valid = 1;
req.bdf_support = 1;
- req.m3_support_valid = 1;
- req.m3_support = 1;
- req.m3_cache_support_valid = 1;
- req.m3_cache_support = 1;
+ if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) {
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+ }
req.cal_done_valid = 1;
req.cal_done = ab->qmi.cal_done;
@@ -2264,6 +2268,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
+ if (resp.single_chip_mlo_support_valid && resp.single_chip_mlo_support)
+ ab->single_chip_mlo_support = true;
+
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2272,7 +2279,8 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ "phy capability resp valid %d single_chip_mlo_support %d valid %d num_phy %d valid %d board_id %d\n",
+ resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support,
resp.num_phy_valid, resp.num_phy,
resp.board_id_valid, resp.board_id);
@@ -2376,7 +2384,8 @@ int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
* failure to firmware and firmware then request multiple blocks of
* small chunk size memory.
*/
- if (ab->qmi.target_mem_delayed) {
+ if (!test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
+ ab->qmi.target_mem_delayed) {
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
@@ -2434,12 +2443,35 @@ out:
return ret;
}
+void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag)
+{
+ struct target_mem_chunk *mlo_chunk;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (!ag->mlo_mem.init_done || ag->num_started)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ag->mlo_mem.chunk); i++) {
+ mlo_chunk = &ag->mlo_mem.chunk[i];
+
+ if (mlo_chunk->v.addr)
+ /* TODO: Mode 0 recovery is the default mode hence resetting the
+ * whole memory region for now. Once Mode 1 support is added, this
+ * needs to be handled properly
+ */
+ memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
+ }
+}
+
static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
struct target_mem_chunk *chunk,
int idx)
{
struct ath12k_hw_group *ag = ab->ag;
struct target_mem_chunk *mlo_chunk;
+ bool fixed_mem;
lockdep_assert_held(&ag->mutex);
@@ -2451,8 +2483,13 @@ static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
return;
}
+ fixed_mem = test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
mlo_chunk = &ag->mlo_mem.chunk[idx];
- if (mlo_chunk->v.addr) {
+
+ if (fixed_mem && mlo_chunk->v.ioaddr) {
+ iounmap(mlo_chunk->v.ioaddr);
+ mlo_chunk->v.ioaddr = NULL;
+ } else if (mlo_chunk->v.addr) {
dma_free_coherent(ab->dev,
mlo_chunk->size,
mlo_chunk->v.addr,
@@ -2462,7 +2499,10 @@ static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
mlo_chunk->paddr = 0;
mlo_chunk->size = 0;
- chunk->v.addr = NULL;
+ if (fixed_mem)
+ chunk->v.ioaddr = NULL;
+ else
+ chunk->v.addr = NULL;
chunk->paddr = 0;
chunk->size = 0;
}
@@ -2473,19 +2513,24 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
int i, mlo_idx;
for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
- if (!ab->qmi.target_mem[i].v.addr)
- continue;
-
if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
ath12k_qmi_free_mlo_mem_chunk(ab,
&ab->qmi.target_mem[i],
mlo_idx++);
} else {
- dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].prev_size,
- ab->qmi.target_mem[i].v.addr,
- ab->qmi.target_mem[i].paddr);
- ab->qmi.target_mem[i].v.addr = NULL;
+ if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
+ ab->qmi.target_mem[i].v.ioaddr) {
+ iounmap(ab->qmi.target_mem[i].v.ioaddr);
+ ab->qmi.target_mem[i].v.ioaddr = NULL;
+ } else {
+ if (!ab->qmi.target_mem[i].v.addr)
+ continue;
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
}
}
@@ -2638,6 +2683,130 @@ err:
return ret;
}
+static int ath12k_qmi_assign_target_mem_chunk(struct ath12k_base *ab)
+{
+ struct reserved_mem *rmem;
+ size_t avail_rmem_size;
+ int i, idx, ret;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case HOST_DDR_REGION_TYPE:
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ avail_rmem_size = rmem->size;
+ if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "failed to assign mem type %u req size %u avail size %zu\n",
+ ab->qmi.target_mem[i].type,
+ ab->qmi.target_mem[i].size,
+ avail_rmem_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->qmi.target_mem[idx].paddr = rmem->base;
+ ab->qmi.target_mem[idx].v.ioaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].v.ioaddr) {
+ ret = -EIO;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ avail_rmem_size = rmem->size - ab->hw_params->bdf_addr_offset;
+ if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "failed to assign mem type %u req size %u avail size %zu\n",
+ ab->qmi.target_mem[i].type,
+ ab->qmi.target_mem[i].size,
+ avail_rmem_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].paddr =
+ rmem->base + ab->hw_params->bdf_addr_offset;
+ ab->qmi.target_mem[idx].v.ioaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].v.ioaddr) {
+ ret = -EIO;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ /* Cold boot calibration is not enabled in Ath12k. Hence,
+ * assign paddr = 0.
+ * Once cold boot calibration is enabled add support to
+ * assign reserved memory from DT.
+ */
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].v.ioaddr = NULL;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ rmem = ath12k_core_get_reserved_mem(ab, 1);
+ if (!rmem) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ avail_rmem_size = rmem->size;
+ if (avail_rmem_size < ab->qmi.target_mem[i].size) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "failed to assign mem type %u req size %u avail size %zu\n",
+ ab->qmi.target_mem[i].type,
+ ab->qmi.target_mem[i].size,
+ avail_rmem_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->qmi.target_mem[idx].paddr = rmem->base;
+ ab->qmi.target_mem[idx].v.ioaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].v.ioaddr) {
+ ret = -EIO;
+ goto out;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath12k_warn(ab, "qmi ignore invalid mem req type %u\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+out:
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return ret;
+}
+
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
@@ -2939,6 +3108,9 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_remoteproc)
+ return;
+
if (!m3_mem->vaddr)
return;
@@ -3019,15 +3191,16 @@ int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
struct qmi_txn txn;
int ret = 0;
- ret = ath12k_qmi_m3_load(ab);
- if (ret) {
- ath12k_err(ab, "failed to load m3 firmware: %d", ret);
- return ret;
+ if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) {
+ ret = ath12k_qmi_m3_load(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
}
- req.addr = m3_mem->paddr;
- req.size = m3_mem->size;
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -3477,11 +3650,20 @@ static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- ret = ath12k_qmi_alloc_target_mem_chunk(ab);
- if (ret) {
- ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
- ret);
- return;
+ if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags)) {
+ ret = ath12k_qmi_assign_target_mem_chunk(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to assign qmi target memory: %d\n",
+ ret);
+ return;
+ }
+ } else {
+ ret = ath12k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ret);
+ return;
+ }
}
ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 45d7c3fcafdd..96e6c3daecfe 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
@@ -21,6 +21,7 @@
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850 0x1
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274 0x07
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ5332 0x2
#define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH12K_QMI_RESP_LEN_MAX 8192
#define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
@@ -41,6 +42,7 @@
#define ATH12K_BOARD_ID_DEFAULT 0xFF
struct ath12k_base;
+struct ath12k_hw_group;
enum ath12k_qmi_file_type {
ATH12K_QMI_FILE_TYPE_BDF_GOLDEN = 0,
@@ -621,5 +623,6 @@ void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
+void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index 439d61f284d8..2598b39d5d7e 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
#include "debug.h"
+#include "mac.h"
/* World regdom to be used in case default regd from fw is unavailable */
#define ATH12K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
@@ -48,6 +49,7 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
+ struct wmi_set_current_country_arg current_arg = {};
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
int ret, i;
@@ -55,6 +57,24 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "driver initiated regd update\n");
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return;
+
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_reg_update_chan_list(ar, true);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to update chan list for pdev %u, ret %d\n",
+ i, ret);
+ break;
+ }
+ }
+ return;
+ }
+
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
@@ -77,27 +97,38 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
- /* Set the country code to the firmware and wait for
- * the WMI_REG_CHAN_LIST_CC EVENT for updating the
- * reg info
- */
- arg.flags = ALPHA_IS_SET;
- memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
- arg.cc_info.alpha2[2] = 0;
-
/* Allow fresh updates to wiphy regd */
ah->regd_updated = false;
/* Send the reg change request to all the radios */
for_each_ar(ah, ar, i) {
- ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
- if (ret)
- ath12k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ if (ar->ab->hw_params->current_cc_support) {
+ memcpy(&current_arg.alpha2, request->alpha2, 2);
+ memcpy(&ar->alpha2, &current_arg.alpha2, 2);
+ ret = ath12k_wmi_send_set_current_country_cmd(ar, &current_arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ arg.flags = ALPHA_IS_SET;
+ memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
+ arg.cc_info.alpha2[2] = 0;
+
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed set INIT Country code: %d\n", ret);
+ }
+
+ wiphy_lock(wiphy);
+ ath12k_mac_11d_scan_stop(ar);
+ wiphy_unlock(wiphy);
+
+ ar->regdom_set_by_user = true;
}
}
-int ath12k_reg_update_chan_list(struct ath12k *ar)
+int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
{
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
@@ -106,7 +137,35 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret;
+ int i, ret, left;
+
+ if (wait && ar->state_11d == ATH12K_11D_RUNNING) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH12K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH12K_11D_IDLE;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if (wait &&
+ (ar->scan.state == ATH12K_SCAN_STARTING ||
+ ar->scan.state == ATH12K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH12K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ if (ar->ah->state == ATH12K_HW_STATE_RESTARTING)
+ return 0;
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
@@ -206,15 +265,57 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ u32 phy_id, freq_low, freq_high, supported_bands;
struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
- int i;
ab = ar->ab;
+ supported_bands = ar->pdev->cap.supported_bands;
+ reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
+
+ /* Possible that due to reg change, current limits for supported
+ * frequency changed. Update it. As a first step, reset the
+ * previous values and then compute and set the new values.
+ */
+ ar->freq_range.start_freq = 0;
+ ar->freq_range.end_freq = 0;
+
+ if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_2GHZ_CAP].phy_id;
+ reg_cap = &ab->hal_reg_cap[phy_id];
+ }
+
+ freq_low = max(reg_cap->low_2ghz_chan, ab->reg_freq_2ghz.start_freq);
+ freq_high = min(reg_cap->high_2ghz_chan, ab->reg_freq_2ghz.end_freq);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) {
+ if (ab->hw_params->single_pdev_only) {
+ phy_id = ar->pdev->cap.band[WMI_HOST_WLAN_5GHZ_CAP].phy_id;
+ reg_cap = &ab->hal_reg_cap[phy_id];
+ }
+
+ freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5ghz.end_freq);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) {
+ freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq);
+ freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq);
+
+ ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+ }
+
/* If one of the radios within ah has already updated the regd for
* the wiphy, then avoid setting regd again
*/
@@ -275,11 +376,7 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
goto err;
}
- rtnl_lock();
- wiphy_lock(hw->wiphy);
- ret = regulatory_set_wiphy_regd_sync(hw->wiphy, regd_copy);
- wiphy_unlock(hw->wiphy);
- rtnl_unlock();
+ ret = regulatory_set_wiphy_regd(hw->wiphy, regd_copy);
kfree(regd_copy);
@@ -290,15 +387,7 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
goto skip;
ah->regd_updated = true;
- /* Apply the new regd to all the radios, this is expected to be received only once
- * since we check for ah->regd_updated and allow here only once.
- */
- for_each_ar(ah, ar, i) {
- ab = ar->ab;
- ret = ath12k_reg_update_chan_list(ar);
- if (ret)
- goto err;
- }
+
skip:
return 0;
err:
@@ -365,129 +454,6 @@ static u32 ath12k_map_fw_phy_flags(u32 phy_flags)
return flags;
}
-static bool
-ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
- struct ieee80211_reg_rule *rule2)
-{
- u32 start_freq1, end_freq1;
- u32 start_freq2, end_freq2;
-
- start_freq1 = rule1->freq_range.start_freq_khz;
- start_freq2 = rule2->freq_range.start_freq_khz;
-
- end_freq1 = rule1->freq_range.end_freq_khz;
- end_freq2 = rule2->freq_range.end_freq_khz;
-
- if ((start_freq1 >= start_freq2 &&
- start_freq1 < end_freq2) ||
- (start_freq2 > start_freq1 &&
- start_freq2 < end_freq1))
- return true;
-
- /* TODO: Should we restrict intersection feasibility
- * based on min bandwidth of the intersected region also,
- * say the intersected rule should have a min bandwidth
- * of 20MHz?
- */
-
- return false;
-}
-
-static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
- struct ieee80211_reg_rule *rule2,
- struct ieee80211_reg_rule *new_rule)
-{
- u32 start_freq1, end_freq1;
- u32 start_freq2, end_freq2;
- u32 freq_diff, max_bw;
-
- start_freq1 = rule1->freq_range.start_freq_khz;
- start_freq2 = rule2->freq_range.start_freq_khz;
-
- end_freq1 = rule1->freq_range.end_freq_khz;
- end_freq2 = rule2->freq_range.end_freq_khz;
-
- new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
- start_freq2);
- new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
-
- freq_diff = new_rule->freq_range.end_freq_khz -
- new_rule->freq_range.start_freq_khz;
- max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
- rule2->freq_range.max_bandwidth_khz);
- new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
-
- new_rule->power_rule.max_antenna_gain =
- min_t(u32, rule1->power_rule.max_antenna_gain,
- rule2->power_rule.max_antenna_gain);
-
- new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
- rule2->power_rule.max_eirp);
-
- /* Use the flags of both the rules */
- new_rule->flags = rule1->flags | rule2->flags;
-
- /* To be safe, lts use the max cac timeout of both rules */
- new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
- rule2->dfs_cac_ms);
-}
-
-static struct ieee80211_regdomain *
-ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
- struct ieee80211_regdomain *curr_regd)
-{
- u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
- struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
- struct ieee80211_regdomain *new_regd = NULL;
- u8 i, j, k;
-
- num_old_regd_rules = default_regd->n_reg_rules;
- num_curr_regd_rules = curr_regd->n_reg_rules;
- num_new_regd_rules = 0;
-
- /* Find the number of intersecting rules to allocate new regd memory */
- for (i = 0; i < num_old_regd_rules; i++) {
- old_rule = default_regd->reg_rules + i;
- for (j = 0; j < num_curr_regd_rules; j++) {
- curr_rule = curr_regd->reg_rules + j;
-
- if (ath12k_reg_can_intersect(old_rule, curr_rule))
- num_new_regd_rules++;
- }
- }
-
- if (!num_new_regd_rules)
- return NULL;
-
- new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
- sizeof(struct ieee80211_reg_rule)),
- GFP_ATOMIC);
-
- if (!new_regd)
- return NULL;
-
- /* We set the new country and dfs region directly and only trim
- * the freq, power, antenna gain by intersecting with the
- * default regdomain. Also MAX of the dfs cac timeout is selected.
- */
- new_regd->n_reg_rules = num_new_regd_rules;
- memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
- new_regd->dfs_region = curr_regd->dfs_region;
- new_rule = new_regd->reg_rules;
-
- for (i = 0, k = 0; i < num_old_regd_rules; i++) {
- old_rule = default_regd->reg_rules + i;
- for (j = 0; j < num_curr_regd_rules; j++) {
- curr_rule = curr_regd->reg_rules + j;
-
- if (ath12k_reg_can_intersect(old_rule, curr_rule))
- ath12k_reg_intersect_rules(old_rule, curr_rule,
- (new_rule + k++));
- }
- }
- return new_regd;
-}
-
static const char *
ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
@@ -524,13 +490,14 @@ ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
- u32 reg_flags)
+ s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -552,7 +519,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -574,7 +541,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -599,7 +566,7 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -611,26 +578,77 @@ ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
*rule_idx = i;
}
+static void ath12k_reg_update_freq_range(struct ath12k_reg_freq *reg_freq,
+ struct ath12k_reg_rule *reg_rule)
+{
+ if (reg_freq->start_freq > reg_rule->start_freq)
+ reg_freq->start_freq = reg_rule->start_freq;
+
+ if (reg_freq->end_freq < reg_rule->end_freq)
+ reg_freq->end_freq = reg_rule->end_freq;
+}
+
+enum wmi_reg_6g_ap_type
+ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VLP_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath12k_reg_build_regd(struct ath12k_base *ab,
- struct ath12k_reg_info *reg_info, bool intersect)
+ struct ath12k_reg_info *reg_info,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
- struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct ath12k_reg_rule *reg_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ struct ath12k_reg_rule *reg_rule, *reg_rule_6ghz;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
char alpha2[3];
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
- /* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list.
- * This can be updated to choose the combination dynamically based on AP
- * type and client type, after complete 6G regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6g_ap_type ap_type;
+
+ ap_type = ath12k_reg_ap_pwr_convert(power_type);
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6g_reg_rules_cl
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6g_reg_rules_cl
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6g_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6g_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6g_reg_rules_ap
+ [WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -639,21 +657,31 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
num_rules += 2;
- tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ new_regd = kzalloc(sizeof(*new_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
- if (!tmp_regd)
+ if (!new_regd)
goto ret;
- memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
- tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
+ new_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
ath12k_dbg(ab, ATH12K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
- alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
+ alpha2, ath12k_reg_get_regdom_str(new_regd->dfs_region),
reg_info->dfs_region, num_rules);
+
+ /* Reset start and end frequency for each band
+ */
+ ab->reg_freq_5ghz.start_freq = INT_MAX;
+ ab->reg_freq_5ghz.end_freq = 0;
+ ab->reg_freq_2ghz.start_freq = INT_MAX;
+ ab->reg_freq_2ghz.end_freq = 0;
+ ab->reg_freq_6ghz.start_freq = INT_MAX;
+ ab->reg_freq_6ghz.end_freq = 0;
+
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
*/
@@ -664,6 +692,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
flags = 0;
+ ath12k_reg_update_freq_range(&ab->reg_freq_2ghz, reg_rule);
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
reg_rule = reg_info->reg_rules_5g_ptr + j++;
@@ -677,13 +706,15 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
+ ath12k_reg_update_freq_range(&ab->reg_freq_5ghz, reg_rule);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ (k < reg_6ghz_number)) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
+ ath12k_reg_update_freq_range(&ab->reg_freq_6ghz, reg_rule);
} else {
break;
}
@@ -691,11 +722,11 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap);
- ath12k_reg_update_rule(tmp_regd->reg_rules + i,
+ ath12k_reg_update_rule(new_regd->reg_rules + i,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -706,7 +737,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
- ath12k_reg_update_weather_radar_band(ab, tmp_regd,
+ ath12k_reg_update_weather_radar_band(ab, new_regd,
reg_rule, &i,
flags, max_bw);
continue;
@@ -716,36 +747,19 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
- tmp_regd->reg_rules[i].dfs_cac_ms,
+ new_regd->reg_rules[i].dfs_cac_ms,
flags, reg_rule->psd_flag, reg_rule->psd_eirp);
} else {
ath12k_dbg(ab, ATH12K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
- tmp_regd->reg_rules[i].dfs_cac_ms,
+ new_regd->reg_rules[i].dfs_cac_ms,
flags);
}
}
- tmp_regd->n_reg_rules = i;
-
- if (intersect) {
- default_regd = ab->default_regd[reg_info->phy_id];
-
- /* Get a new regd by intersecting the received regd with
- * our default regd.
- */
- new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
- kfree(tmp_regd);
- if (!new_regd) {
- ath12k_warn(ab, "Unable to create intersected regdomain\n");
- goto ret;
- }
- } else {
- new_regd = tmp_regd;
- }
-
+ new_regd->n_reg_rules = i;
ret:
return new_regd;
}
@@ -767,9 +781,109 @@ void ath12k_regd_update_work(struct work_struct *work)
}
}
+void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info)
+{
+ u8 i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+
+ if (reg_info->is_ext_reg_event) {
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6g_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6g_client_ptr[i][j]);
+ }
+ }
+}
+
+enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info)
+{
+ int pdev_idx = reg_info->phy_id;
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested country,
+ * firmware retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return ATH12K_REG_STATUS_DROP;
+ }
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params->single_pdev_only &&
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
+ return ATH12K_REG_STATUS_DROP;
+ else
+ return ATH12K_REG_STATUS_FALLBACK;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp(ab->default_regd[pdev_idx]->alpha2,
+ reg_info->alpha2, 2))
+ return ATH12K_REG_STATUS_DROP;
+
+ return ATH12K_REG_STATUS_VALID;
+}
+
+int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ int pdev_idx = reg_info->phy_id;
+ struct ath12k *ar;
+
+ regd = ath12k_reg_build_regd(ab, reg_info, vdev_type, power_type);
+ if (!regd)
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * firmware is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+}
+
void ath12k_reg_init(struct ieee80211_hw *hw)
{
hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
hw->wiphy->reg_notifier = ath12k_reg_notifier;
}
@@ -777,8 +891,18 @@ void ath12k_reg_free(struct ath12k_base *ab)
{
int i;
+ mutex_lock(&ab->core_lock);
+ for (i = 0; i < MAX_RADIOS; i++) {
+ ath12k_reg_reset_reg_info(ab->reg_info[i]);
+ kfree(ab->reg_info[i]);
+ ab->reg_info[i] = NULL;
+ }
+
for (i = 0; i < ab->hw_params->max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
+ ab->default_regd[i] = NULL;
+ ab->new_regd[i] = NULL;
}
+ mutex_unlock(&ab->core_lock);
}
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index 75f80df2aa0c..8af8e9ba462e 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -92,13 +92,29 @@ enum ath12k_reg_phy_bitmap {
ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6),
};
+enum ath12k_reg_status {
+ ATH12K_REG_STATUS_VALID,
+ ATH12K_REG_STATUS_DROP,
+ ATH12K_REG_STATUS_FALLBACK,
+};
+
void ath12k_reg_init(struct ieee80211_hw *hw);
void ath12k_reg_free(struct ath12k_base *ab);
void ath12k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info,
- bool intersect);
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath12k_regd_update(struct ath12k *ar, bool init);
-int ath12k_reg_update_chan_list(struct ath12k *ar);
+int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait);
+void ath12k_reg_reset_reg_info(struct ath12k_reg_info *reg_info);
+int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
+enum wmi_reg_6g_ap_type
+ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/testmode.c b/drivers/net/wireless/ath/ath12k/testmode.c
index 18d56a976dc7..fb6af7ccf71f 100644
--- a/drivers/net/wireless/ath/ath12k/testmode.c
+++ b/drivers/net/wireless/ath/ath12k/testmode.c
@@ -97,7 +97,7 @@ void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
u8 const *buf_pos;
ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ "testmode event wmi cmd_id %d ftm event msg %p datalen %d\n",
cmd_id, ftm_msg, length);
ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id));
@@ -227,7 +227,7 @@ static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
cmd_id, buf, buf_len);
ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
bufpos = buf;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 6d1ea5f3a791..60e2444fe08c 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -29,6 +29,7 @@ struct ath12k_wmi_svc_ready_parse {
struct wmi_tlv_fw_stats_parse {
const struct wmi_stats_event *ev;
+ struct ath12k_fw_stats *stats;
};
struct ath12k_wmi_dma_ring_caps_parse {
@@ -177,6 +178,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
[WMI_TAG_P2P_NOA_EVENT] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
+ [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+ .min_len = sizeof(struct wmi_11d_new_cc_event) },
};
__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -520,10 +523,10 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
* band to band for a single radio, need to see how this should be
* handled.
*/
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
- } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
@@ -546,7 +549,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
@@ -566,7 +569,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
}
- if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
cap_band->max_bw_supported =
@@ -1037,14 +1040,32 @@ int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
struct wmi_vdev_start_req_arg *arg)
{
+ u32 center_freq1 = arg->band_center_freq1;
+
memset(chan, 0, sizeof(*chan));
chan->mhz = cpu_to_le32(arg->freq);
- chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
- if (arg->mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq1 = cpu_to_le32(center_freq1);
+ if (arg->mode == MODE_11BE_EHT320) {
+ if (arg->freq > center_freq1)
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
+ else
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
+
+ chan->band_center_freq2 = cpu_to_le32(center_freq1);
+
+ } else if (arg->mode == MODE_11BE_EHT160) {
+ if (arg->freq > center_freq1)
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
+ else
+ chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
+
+ chan->band_center_freq2 = cpu_to_le32(center_freq1);
+ } else if (arg->mode == MODE_11BE_EHT80_80) {
chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
- else
+ } else {
chan->band_center_freq2 = 0;
+ }
chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
if (arg->passive)
@@ -2166,9 +2187,10 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
- u32 peer_legacy_rates_align;
- u32 peer_ht_rates_align;
+ u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
+ u32 peer_ht_rates_align, eml_trans_timeout;
int i, ret, len;
+ u16 eml_cap;
__le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
@@ -2340,6 +2362,24 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+
+ eml_cap = arg->ml.eml_cap;
+ if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
+ /* Padding delay */
+ eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
+ ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
+ /* Transition delay */
+ eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
+ ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
+ /* Transition timeout */
+ eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
+ ml_params->emlsr_trans_timeout_us =
+ cpu_to_le32(eml_trans_timeout);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
+ arg->peer_mac, eml_pad_delay, eml_trans_delay,
+ eml_trans_timeout);
+ }
+
ptr += sizeof(*ml_params);
skip_ml_params:
@@ -2351,7 +2391,7 @@ skip_ml_params:
for (i = 0; i < arg->peer_eht_mcs_count; i++) {
eht_mcs = ptr;
- eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
+ eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
sizeof(*eht_mcs));
eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
@@ -2359,6 +2399,10 @@ skip_ml_params:
ptr += sizeof(*eht_mcs);
}
+ /* Update MCS15 capability */
+ if (arg->eht_disable_mcs15)
+ cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
+
tlv = ptr;
len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
/* fill ML Partner links */
@@ -2399,7 +2443,7 @@ skip_ml_params:
send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
cmd->peer_listen_intval, cmd->peer_ht_caps,
@@ -2412,7 +2456,7 @@ send:
cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
- cmd->peer_eht_cap_phy[2]);
+ cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
if (ret) {
@@ -2591,7 +2635,10 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
cmd->scan_id = cpu_to_le32(arg->scan_id);
cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
- cmd->scan_priority = cpu_to_le32(arg->scan_priority);
+ if (ar->state_11d == ATH12K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
@@ -3313,6 +3360,110 @@ out:
return ret;
}
+int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
+ struct wmi_set_current_country_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_set_current_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_current_country_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "set current country pdev id %d alpha2 %c%c\n",
+ ar->pdev->pdev_id,
+ arg->alpha2[0],
+ arg->alpha2[1]);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
+ struct wmi_11d_scan_start_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_11d_scan_start_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
+ cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "send 11d scan start vdev id %d period %d ms internal %d ms\n",
+ arg->vdev_id, arg->scan_period_msec,
+ arg->start_interval_msec);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_11d_scan_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "send 11d scan stop vdev id %d\n",
+ cmd->vdev_id);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int
ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
{
@@ -3646,15 +3797,15 @@ ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
arg[i].pdev_id = pdev->pdev_id;
switch (pdev->cap.supported_bands) {
- case WMI_HOST_WLAN_2G_5G_CAP:
+ case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
- case WMI_HOST_WLAN_2G_CAP:
+ case WMI_HOST_WLAN_2GHZ_CAP:
arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
break;
- case WMI_HOST_WLAN_5G_CAP:
+ case WMI_HOST_WLAN_5GHZ_CAP:
arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
break;
@@ -3665,7 +3816,8 @@ ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
}
static void
-ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
+ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_params *wmi_cfg,
struct ath12k_wmi_resource_config_arg *tg_cfg)
{
wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
@@ -3732,6 +3884,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ if (ab->hw_params->reoq_lut_support)
+ wmi_cfg->host_service_flags |=
+ cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
@@ -3772,7 +3927,7 @@ static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
ptr = skb->data + sizeof(*cmd);
cfg = ptr;
- ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
+ ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
sizeof(*cfg));
@@ -4601,6 +4756,7 @@ static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
return 0;
err:
+ kfree(svc_rdy_ext.mac_phy_caps);
ath12k_wmi_free_dbring_caps(ab);
return ret;
}
@@ -4699,7 +4855,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
bands = pdev->cap.supported_bands;
}
- if (bands & WMI_HOST_WLAN_2G_CAP) {
+ if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
caps->eht_cap_mac_info_2ghz,
caps->eht_cap_phy_info_2ghz,
@@ -4708,7 +4864,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
caps->eht_cap_info_internal);
}
- if (bands & WMI_HOST_WLAN_5G_CAP) {
+ if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
caps->eht_cap_mac_info_5ghz,
caps->eht_cap_phy_info_5ghz,
@@ -4922,7 +5078,7 @@ static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_param
for (count = 0; count < num_reg_rules; count++) {
start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
- if (start_freq >= ATH12K_MIN_6G_FREQ)
+ if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
num_invalid_5ghz_rules++;
}
@@ -4992,9 +5148,9 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
- if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
+ if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
- i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
+ i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
kfree(tb);
return -EINVAL;
}
@@ -5015,9 +5171,9 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
- if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
- num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
- num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
+ if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6GHZ_REG_RULES) {
ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
i);
kfree(tb);
@@ -5933,30 +6089,62 @@ static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
}
-static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
- struct sk_buff *skb)
+static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- dev_kfree_skb(skb);
-}
+ const struct wmi_11d_new_cc_event *ev;
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ const void **tb;
+ int ret, i;
-static bool ath12k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath12k_warn(ab, "failed to fetch 11d new cc ev");
+ return -EPROTO;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
+ ab->new_alpha2[0],
+ ab->new_alpha2[1]);
+
+ kfree(tb);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH12K_11D_IDLE;
+ ar->ah->regd_updated = false;
+ complete(&ar->completed_11d_scan);
+ }
+
+ queue_work(ab->workqueue, &ab->update_11d_work);
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
+ return 0;
+}
- return false;
+static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
}
static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- struct ath12k_reg_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath12k *ar;
+ struct ath12k_reg_info *reg_info;
+ u8 pdev_idx;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
if (!reg_info) {
@@ -5965,86 +6153,52 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
}
ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
-
if (ret) {
ath12k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
+ goto mem_free;
}
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
+ ret = ath12k_reg_validate_reg_info(ab, reg_info);
+ if (ret == ATH12K_REG_STATUS_FALLBACK) {
+ ath12k_warn(ab, "failed to validate reg info %d\n", ret);
+ /* firmware has successfully switches to new regd but host can not
+ * continue, so free reginfo and fallback to old regd
*/
- ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ } else if (ret == ATH12K_REG_STATUS_DROP) {
+ /* reg info is valid but we will not store it and
+ * not going to create new regd for it
+ */
+ ret = ATH12K_REG_STATUS_VALID;
goto mem_free;
}
+ /* free old reg_info if it exist */
pdev_idx = reg_info->phy_id;
-
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxdma_per_pdev)
- goto mem_free;
- else
- goto fallback;
+ if (ab->reg_info[pdev_idx]) {
+ ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
+ kfree(ab->reg_info[pdev_idx]);
}
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
+ /* reg_info is valid, we store it for later use
+ * even below regd build failed
*/
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp(ab->default_regd[pdev_idx]->alpha2,
- reg_info->alpha2, 2))
- goto mem_free;
+ ab->reg_info[pdev_idx] = reg_info;
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath12k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath12k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath12k_warn(ab, "failed to build regd from reg_info\n");
+ ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
+ IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath12k_warn(ab, "failed to handle chan list %d\n", ret);
goto fallback;
}
- spin_lock(&ab->base_lock);
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
- /* Once mac is registered, ar is valid and all CC events from
- * fw is considered to be received due to user requests
- * currently.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* Multiple events for the same *ar is not expected. But we
- * can still clear any previously stored default_regd if we
- * are receiving this event for the same radio by mistake.
- * NULL pointer handling will be taken care by kfree itself.
- */
- kfree(ab->default_regd[pdev_idx]);
- /* This regd would be applied during mac registration */
- ab->default_regd[pdev_idx] = regd;
- }
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
+ goto out;
+
+mem_free:
+ ath12k_reg_reset_reg_info(reg_info);
+ kfree(reg_info);
- goto mem_free;
+ if (ret == ATH12K_REG_STATUS_VALID)
+ return ret;
fallback:
/* Fallback to older reg (by sending previous country setting
@@ -6056,20 +6210,8 @@ fallback:
*/
/* TODO: This is rare, but still should also be handled */
WARN_ON(1);
-mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2g_ptr);
- kfree(reg_info->reg_rules_5g_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6g_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+
+out:
return ret;
}
@@ -6225,13 +6367,14 @@ static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff
ar->last_wmi_vdev_start_status = 0;
status = le32_to_cpu(vdev_start_resp.status);
-
if (WARN_ON_ONCE(status)) {
ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
status, ath12k_wmi_vdev_resp_print(status));
ar->last_wmi_vdev_start_status = status;
}
+ ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
+
complete(&ar->vdev_setup_done);
rcu_read_unlock();
@@ -6317,13 +6460,13 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
- rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
status->band = NL80211_BAND_6GHZ;
status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
- } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
@@ -7216,7 +7359,7 @@ void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
else
buf[len] = 0;
- ath12k_debugfs_fw_stats_reset(ar);
+ ath12k_fw_stats_reset(ar);
}
static void
@@ -7335,7 +7478,7 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
u16 len)
{
const struct wmi_stats_event *ev = parse->ev;
- struct ath12k_fw_stats stats = {0};
+ struct ath12k_fw_stats *stats = parse->stats;
struct ath12k *ar;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -7344,18 +7487,18 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
int i, ret = 0;
const void *data = ptr;
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
- INIT_LIST_HEAD(&stats.pdevs);
-
if (!ev) {
ath12k_warn(ab, "failed to fetch update stats ev");
return -EPROTO;
}
+ if (!stats)
+ return -EINVAL;
+
rcu_read_lock();
- ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+ stats->pdev_id = le32_to_cpu(ev->pdev_id);
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
if (!ar) {
ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
le32_to_cpu(ev->pdev_id));
@@ -7398,8 +7541,8 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
if (!dst)
continue;
ath12k_wmi_pull_vdev_stats(src, dst);
- stats.stats_id = WMI_REQUEST_VDEV_STAT;
- list_add_tail(&dst->list, &stats.vdevs);
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+ list_add_tail(&dst->list, &stats->vdevs);
}
for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
const struct ath12k_wmi_bcn_stats_params *src;
@@ -7417,8 +7560,8 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
if (!dst)
continue;
ath12k_wmi_pull_bcn_stats(src, dst);
- stats.stats_id = WMI_REQUEST_BCN_STAT;
- list_add_tail(&dst->list, &stats.bcn);
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+ list_add_tail(&dst->list, &stats->bcn);
}
for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
const struct ath12k_wmi_pdev_stats_params *src;
@@ -7430,7 +7573,7 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
goto exit;
}
- stats.stats_id = WMI_REQUEST_PDEV_STAT;
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
data += sizeof(*src);
len -= sizeof(*src);
@@ -7442,11 +7585,9 @@ static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
- list_add_tail(&dst->list, &stats.pdevs);
+ list_add_tail(&dst->list, &stats->pdevs);
}
- complete(&ar->fw_stats_complete);
- ath12k_debugfs_fw_stats_process(ar, &stats);
exit:
rcu_read_unlock();
return ret;
@@ -7472,16 +7613,74 @@ static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
return ret;
}
+static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
+ struct ath12k_fw_stats *stats)
+{
+ struct wmi_tlv_fw_stats_parse parse = {};
+
+ stats->stats_id = 0;
+ parse.stats = stats;
+
+ return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tlv_fw_stats_parse,
+ &parse);
+}
+
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
+ struct ath12k_fw_stats stats = {};
+ struct ath12k *ar;
int ret;
- struct wmi_tlv_fw_stats_parse parse = {};
- ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
- ath12k_wmi_tlv_fw_stats_parse,
- &parse);
- if (ret)
- ath12k_warn(ab, "failed to parse fw stats %d\n", ret);
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats.fw_stats_done = true;
+ goto complete;
+ }
+
+ /* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only
+ * via debugfs fw stats. Hence, processing these in debugfs context.
+ */
+ ath12k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
+ * at this point, no need to free the individual list.
+ */
+ return;
+
+free:
+ ath12k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -8640,6 +8839,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
ath12k_wmi_process_tpc_stats(ab, skb);
break;
+ case WMI_11D_NEW_COUNTRY_EVENTID:
+ ath12k_reg_11d_new_cc_event(ab, skb);
+ break;
/* add Unsupported events (rare) here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@@ -9643,3 +9845,69 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar)
return 0;
}
+
+bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
+
+int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
+ u32 vdev_id,
+ struct ath12k_reg_tpc_power_info *param)
+{
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_ch_power_params *ch;
+ int i, ret, len, array_len;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->psd_power = cpu_to_le32(param->is_psd_power);
+ cmd->eirp_power = cpu_to_le32(param->eirp_power);
+ cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_params *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
+ sizeof(*ch));
+ ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
+ ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 1ba33e30ddd2..ac18f75e0449 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -26,6 +26,7 @@ struct ath12k_base;
struct ath12k;
struct ath12k_link_vif;
struct ath12k_fw_stats;
+struct ath12k_reg_tpc_power_info;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -216,9 +217,9 @@ enum wmi_host_hw_mode_priority {
};
enum WMI_HOST_WLAN_BAND {
- WMI_HOST_WLAN_2G_CAP = 1,
- WMI_HOST_WLAN_5G_CAP = 2,
- WMI_HOST_WLAN_2G_5G_CAP = 3,
+ WMI_HOST_WLAN_2GHZ_CAP = 1,
+ WMI_HOST_WLAN_5GHZ_CAP = 2,
+ WMI_HOST_WLAN_2GHZ_5GHZ_CAP = 3,
};
enum wmi_cmd_group {
@@ -386,6 +387,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1955,6 +1972,8 @@ enum wmi_tlv_tag {
WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
WMI_TAG_TPC_STATS_RATES_ARRAY,
WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@@ -2201,6 +2220,8 @@ enum wmi_tlv_service {
WMI_MAX_EXT_SERVICE = 256,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
+
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_11BE = 289,
@@ -2461,6 +2482,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT 12
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
@@ -2690,8 +2712,8 @@ enum wmi_channel_width {
* 2 - index for 160 MHz, first 3 bytes valid
* 3 - index for 320 MHz, first 3 bytes valid
*/
-#define WMI_MAX_EHT_SUPP_MCS_2G_SIZE 2
-#define WMI_MAX_EHT_SUPP_MCS_5G_SIZE 4
+#define WMI_MAX_EHT_SUPP_MCS_2GHZ_SIZE 2
+#define WMI_MAX_EHT_SUPP_MCS_5GHZ_SIZE 4
#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_80 0
#define WMI_EHTCAP_TXRX_MCS_NSS_IDX_160 1
@@ -2730,8 +2752,8 @@ struct ath12k_wmi_caps_ext_params {
struct ath12k_wmi_ppe_threshold_params eht_ppet_2ghz;
struct ath12k_wmi_ppe_threshold_params eht_ppet_5ghz;
__le32 eht_cap_info_internal;
- __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
- __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+ __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2GHZ_SIZE];
+ __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5GHZ_SIZE];
__le32 eml_capability;
__le32 mld_capability;
} __packed;
@@ -3754,6 +3776,7 @@ struct peer_assoc_mlo_params {
u32 ieee_link_id;
u8 num_partner_links;
struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+ u16 eml_cap;
};
struct wmi_rate_set_arg {
@@ -3832,6 +3855,7 @@ struct ath12k_wmi_peer_assoc_arg {
u32 punct_bitmap;
bool is_assoc;
struct peer_assoc_mlo_params ml;
+ bool eht_disable_mcs15;
};
#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
@@ -4026,6 +4050,28 @@ struct wmi_init_country_cmd {
} cc_info;
} __packed;
+struct wmi_11d_scan_start_arg {
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+};
+
+struct wmi_11d_scan_start_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 scan_period_msec;
+ __le32 start_interval_msec;
+} __packed;
+
+struct wmi_11d_scan_stop_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_11d_new_cc_event {
+ __le32 new_alpha2;
+} __packed;
+
struct wmi_delba_send_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -4108,7 +4154,17 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
-#define MAX_6G_REG_RULES 5
+#define MAX_6GHZ_REG_RULES 5
+
+struct wmi_set_current_country_arg {
+ u8 alpha2[REG_ALPHA2_LEN];
+};
+
+struct wmi_set_current_country_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 new_alpha2;
+} __packed;
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,
@@ -4129,6 +4185,7 @@ struct wmi_vdev_start_resp_event {
};
__le32 cfgd_tx_streams;
__le32 cfgd_rx_streams;
+ __le32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4474,6 +4531,7 @@ struct ath12k_wmi_target_cap_arg {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -5904,6 +5962,41 @@ struct wmi_tpc_stats_arg {
struct wmi_tpc_ctl_pwr_table_arg ctl_array;
};
+struct wmi_vdev_ch_power_params {
+ __le32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ __le32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ __le32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ __le32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ __le32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ __le32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -5990,11 +6083,17 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
u32 vdev_id, u32 bcn_ctrl_op);
int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
struct ath12k_wmi_init_country_arg *arg);
+int
+ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
+ struct wmi_set_current_country_arg *arg);
int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
int vdev_id, const u8 *addr,
dma_addr_t paddr, u8 tid,
u8 ba_window_size_valid,
u32 ba_window_size);
+int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
+ struct wmi_11d_scan_start_arg *arg);
+int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id);
int
ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
struct ath12k_wmi_rx_reorder_queue_remove_arg *arg);
@@ -6092,5 +6191,9 @@ int ath12k_wmi_mlo_teardown(struct ath12k *ar);
void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
struct ath12k_fw_stats *fw_stats, u32 stats_id,
char *buf);
+bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar);
+int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
+ u32 vdev_id,
+ struct ath12k_reg_tpc_power_info *param);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index d4805e02b927..49b7ab26c477 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -74,7 +74,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
void __iomem *mem;
struct ath_softc *sc;
struct ieee80211_hw *hw;
- struct resource *res;
const struct platform_device_id *id = platform_get_device_id(pdev);
int irq;
int ret = 0;
@@ -86,16 +85,10 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource found\n");
- return -ENXIO;
- }
-
- mem = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (mem == NULL) {
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem)) {
dev_err(&pdev->dev, "ioremap failed\n");
- return -ENOMEM;
+ return PTR_ERR(mem);
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 547634f82183..81fa7cbad892 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -290,6 +290,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
struct ath_common *common = ath9k_hw_common(priv->ah);
int slot;
+ if (!priv->cur_beacon_conf.enable_beacon)
+ return;
+
if (swba->beacon_pending != 0) {
priv->beacon.bmisscnt++;
if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 0226c31a6cae..b7717f9e1e9b 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -366,8 +366,7 @@ static void carl9170_tx_shift_bm(struct ar9170 *ar,
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
return;
- if (!bitmap_empty(tid_info->bitmap, off))
- off = find_first_bit(tid_info->bitmap, off);
+ off = min(off, find_first_bit(tid_info->bitmap, off));
tid_info->bsn += off;
tid_info->bsn &= 0x0fff;
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index e5142c052985..d7a2a483cbc4 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -56,7 +56,7 @@ static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg = buf;
wcn36xx_dbg(WCN36XX_DBG_TESTMODE,
- "testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n",
+ "testmode cmd wmi msg_id 0x%04X msg_len %d buf %p buf_len %d\n",
msg->msg_id, msg->msg_body_length,
buf, buf_len);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 689f68d89a44..fff8b7f8abc5 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -7,6 +7,7 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
+#include <net/sock.h>
#include "wil6210.h"
#include "txrx_edma.h"
@@ -616,8 +617,7 @@ static inline bool wil_need_txstat(struct sk_buff *skb)
{
const u8 *da = wil_skb_get_da(skb);
- return is_unicast_ether_addr(da) && skb->sk &&
- (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
+ return is_unicast_ether_addr(da) && sk_requests_wifi_status(skb->sk);
}
static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 4b70845e1a26..dc2383faddd1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -74,7 +74,6 @@
#define VNDR_IE_HDR_SIZE 12
#define VNDR_IE_PARSE_LIMIT 5
-#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */
#define BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
@@ -1945,17 +1944,22 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_cfg80211_security *sec;
- s32 val = 0;
- s32 err = 0;
+ s32 val;
+ s32 err;
- if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
- else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
- val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
- else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3)
+ } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
+ if (drvr->bus_if->fwvid == BRCMF_FWVENDOR_CYW &&
+ sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE)
+ val = WPA3_AUTH_SAE_PSK;
+ else
+ val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
+ } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3) {
val = WPA3_AUTH_SAE_PSK;
- else
+ } else {
val = WPA_AUTH_DISABLED;
+ }
brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val);
if (err) {
@@ -2163,28 +2167,25 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_SAE:
val = WPA3_AUTH_SAE_PSK;
- if (sme->crypto.sae_pwd) {
- brcmf_dbg(INFO, "using SAE offload\n");
- profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
- }
break;
case WLAN_AKM_SUITE_FT_OVER_SAE:
val = WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT;
profile->is_ft = true;
- if (sme->crypto.sae_pwd) {
- brcmf_dbg(INFO, "using SAE offload\n");
- profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
- }
break;
default:
bphy_err(drvr, "invalid akm suite (%d)\n",
sme->crypto.akm_suites[0]);
return -EINVAL;
}
+ if (sme->crypto.sae_pwd) {
+ profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
+ }
}
if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_1X)
brcmf_dbg(INFO, "using 1X offload\n");
+ if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE)
+ brcmf_dbg(INFO, "using SAE offload\n");
if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
goto skip_mfp_config;
@@ -2221,7 +2222,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp);
skip_mfp_config:
- brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
+ brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
if (err) {
bphy_err(drvr, "could not set wpa_auth (%d)\n", err);
@@ -5509,7 +5510,7 @@ brcmf_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
}
-static int
+int
brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params, u64 *cookie)
{
@@ -5616,6 +5617,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
exit:
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_cfg80211_mgmt_tx);
static int brcmf_cfg80211_set_cqm_rssi_range_config(struct wiphy *wiphy,
struct net_device *ndev,
@@ -6009,6 +6011,7 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
+ init_completion(&vif->mgmt_tx);
brcmf_init_prof(&vif->profile);
@@ -6760,6 +6763,8 @@ static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
brcmf_fweh_register(cfg->pub, BRCMF_E_PSK_SUP,
brcmf_notify_connect_status);
brcmf_fweh_register(cfg->pub, BRCMF_E_RSSI, brcmf_notify_rssi);
+
+ brcmf_fwvid_register_event_handlers(cfg->pub);
}
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -7346,6 +7351,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
[NL80211_IFTYPE_P2P_CLIENT] = {
@@ -7653,6 +7659,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP);
}
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE_EXT))
+ wiphy->features |= NL80211_FEATURE_SAE;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 2abae8894614..b83485ec7b87 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -13,6 +13,8 @@
#include "fwil_types.h"
#include "p2p.h"
+#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */
+
#define BRCMF_SCAN_IE_LEN_MAX 2048
#define WL_NUM_SCAN_MAX 10
@@ -142,6 +144,21 @@ enum brcmf_profile_fwauth {
};
/**
+ * enum brcmf_mgmt_tx_status - mgmt frame tx status
+ *
+ * @BRCMF_MGMT_TX_ACK: mgmt frame acked
+ * @BRCMF_MGMT_TX_NOACK: mgmt frame not acked
+ * @BRCMF_MGMT_TX_OFF_CHAN_COMPLETED: off-channel complete
+ * @BRCMF_MGMT_TX_SEND_FRAME: mgmt frame tx is in progres
+ */
+enum brcmf_mgmt_tx_status {
+ BRCMF_MGMT_TX_ACK,
+ BRCMF_MGMT_TX_NOACK,
+ BRCMF_MGMT_TX_OFF_CHAN_COMPLETED,
+ BRCMF_MGMT_TX_SEND_FRAME
+};
+
+/**
* struct brcmf_cfg80211_profile - profile information.
*
* @bssid: bssid of joined/joining ibss.
@@ -211,6 +228,9 @@ struct vif_saved_ie {
* @profile: profile information.
* @sme_state: SME state using enum brcmf_vif_status bits.
* @list: linked list.
+ * @mgmt_tx: completion for management frame transmit.
+ * @mgmt_tx_status: status of last management frame sent to firmware.
+ * @mgmt_tx_id:
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
* @cqm_rssi_low: Lower RSSI limit for CQM monitoring
@@ -224,6 +244,9 @@ struct brcmf_cfg80211_vif {
unsigned long sme_state;
struct vif_saved_ie saved_ie;
struct list_head list;
+ struct completion mgmt_tx;
+ unsigned long mgmt_tx_status;
+ u32 mgmt_tx_id;
u16 mgmt_rx_reg;
bool mbss;
int is_11d;
@@ -468,5 +491,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+int brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie);
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index f26e4679e4ff..75f101622db1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -491,6 +491,7 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
trace_brcmf_dbg(level, func, &vaf);
va_end(args);
}
+BRCMF_EXPORT_SYMBOL_GPL(__brcmf_dbg);
#endif
static void brcmf_mp_attach(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 3d63010ae079..04f41c09deca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1363,6 +1363,8 @@ int brcmf_attach(struct device *dev)
brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
brcmf_psm_watchdog_notify);
+ brcmf_fwvid_get_cfg80211_ops(drvr);
+
ret = brcmf_bus_started(drvr, drvr->ops);
if (ret != 0) {
bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index 9a4837881486..c9537fb597ce 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -8,11 +8,21 @@
#include <bus.h>
#include <fwvid.h>
#include <fwil.h>
+#include <fweh.h>
#include "vops.h"
+#include "fwil_types.h"
+/* event definitions */
+#define BRCMF_CYW_E_EXT_AUTH_REQ 187
+#define BRCMF_CYW_E_EXT_AUTH_FRAME_RX 188
+#define BRCMF_CYW_E_MGMT_FRAME_TXS 189
+#define BRCMF_CYW_E_MGMT_FRAME_TXS_OC 190
#define BRCMF_CYW_E_LAST 197
+#define MGMT_AUTH_FRAME_DWELL_TIME 4000
+#define MGMT_AUTH_FRAME_WAIT_TIME (MGMT_AUTH_FRAME_DWELL_TIME + 100)
+
static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
struct cfg80211_crypto_settings *crypto)
{
@@ -39,6 +49,19 @@ static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
return err;
}
+static const struct brcmf_fweh_event_map brcmf_cyw_event_map = {
+ .items = {
+ { BRCMF_E_EXT_AUTH_REQ, BRCMF_CYW_E_EXT_AUTH_REQ },
+ { BRCMF_E_EXT_AUTH_FRAME_RX, BRCMF_CYW_E_EXT_AUTH_FRAME_RX },
+ { BRCMF_E_MGMT_FRAME_TXSTATUS, BRCMF_CYW_E_MGMT_FRAME_TXS },
+ {
+ BRCMF_E_MGMT_FRAME_OFFCHAN_DONE,
+ BRCMF_CYW_E_MGMT_FRAME_TXS_OC
+ },
+ },
+ .n_items = 4
+};
+
static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
{
struct brcmf_fweh_info *fweh;
@@ -49,11 +72,296 @@ static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
return -ENOMEM;
fweh->num_event_codes = BRCMF_CYW_E_LAST;
+ fweh->event_map = &brcmf_cyw_event_map;
drvr->fweh = fweh;
return 0;
}
+static int brcmf_cyw_activate_events(struct brcmf_if *ifp)
+{
+ struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
+ struct brcmf_eventmsgs_ext *eventmask_msg;
+ u32 msglen;
+ int err;
+
+ msglen = sizeof(*eventmask_msg) + fweh->event_mask_len;
+ eventmask_msg = kzalloc(msglen, GFP_KERNEL);
+ if (!eventmask_msg)
+ return -ENOMEM;
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = CYW_EVENTMSGS_SET_MASK;
+ eventmask_msg->len = fweh->event_mask_len;
+ memcpy(eventmask_msg->mask, fweh->event_mask, fweh->event_mask_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs_ext", eventmask_msg,
+ msglen);
+ kfree(eventmask_msg);
+ return err;
+}
+
+static
+int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct ieee80211_channel *chan = params->chan;
+ struct brcmf_pub *drvr = cfg->pub;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ const struct ieee80211_mgmt *mgmt;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ bool ack = false;
+ s32 chan_nr;
+ u32 freq;
+ struct brcmf_mf_params_le *mf_params;
+ u32 mf_params_len;
+ s32 ready;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ mgmt = (const struct ieee80211_mgmt *)buf;
+
+ if (!ieee80211_is_auth(mgmt->frame_control))
+ return brcmf_cfg80211_mgmt_tx(wiphy, wdev, params, cookie);
+
+ *cookie = 0;
+ vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
+ reinit_completion(&vif->mgmt_tx);
+ clear_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status);
+ clear_bit(BRCMF_MGMT_TX_NOACK, &vif->mgmt_tx_status);
+ clear_bit(BRCMF_MGMT_TX_OFF_CHAN_COMPLETED,
+ &vif->mgmt_tx_status);
+ mf_params_len = offsetof(struct brcmf_mf_params_le, data) +
+ (len - DOT11_MGMT_HDR_LEN);
+ mf_params = kzalloc(mf_params_len, GFP_KERNEL);
+ if (!mf_params)
+ return -ENOMEM;
+
+ mf_params->dwell_time = cpu_to_le32(MGMT_AUTH_FRAME_DWELL_TIME);
+ mf_params->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
+ mf_params->frame_control = mgmt->frame_control;
+
+ if (chan)
+ freq = chan->center_freq;
+ else
+ brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
+ &freq);
+ chan_nr = ieee80211_frequency_to_channel(freq);
+ mf_params->channel = cpu_to_le16(chan_nr);
+ memcpy(&mf_params->da[0], &mgmt->da[0], ETH_ALEN);
+ memcpy(&mf_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
+ mf_params->packet_id = cpu_to_le32(*cookie);
+ memcpy(mf_params->data, &buf[DOT11_MGMT_HDR_LEN],
+ le16_to_cpu(mf_params->len));
+
+ brcmf_dbg(TRACE, "Auth frame, cookie=%d, fc=%04x, len=%d, channel=%d\n",
+ le32_to_cpu(mf_params->packet_id),
+ le16_to_cpu(mf_params->frame_control),
+ le16_to_cpu(mf_params->len), chan_nr);
+
+ vif->mgmt_tx_id = le32_to_cpu(mf_params->packet_id);
+ set_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status);
+
+ err = brcmf_fil_bsscfg_data_set(vif->ifp, "mgmt_frame",
+ mf_params, mf_params_len);
+ if (err) {
+ bphy_err(drvr, "Failed to send Auth frame: err=%d\n",
+ err);
+ goto tx_status;
+ }
+
+ ready = wait_for_completion_timeout(&vif->mgmt_tx,
+ MGMT_AUTH_FRAME_WAIT_TIME);
+ if (test_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status)) {
+ brcmf_dbg(TRACE, "TX Auth frame operation is success\n");
+ ack = true;
+ } else {
+ bphy_err(drvr, "TX Auth frame operation is %s: status=%ld)\n",
+ ready ? "failed" : "timedout", vif->mgmt_tx_status);
+ }
+
+tx_status:
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+ GFP_KERNEL);
+ kfree(mf_params);
+ return err;
+}
+
+static int
+brcmf_cyw_external_auth(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_external_auth_params *params)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_pub *drvr;
+ struct brcmf_auth_req_status_le auth_status;
+ int ret = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ifp = netdev_priv(dev);
+ drvr = ifp->drvr;
+ if (params->status == WLAN_STATUS_SUCCESS) {
+ auth_status.flags = cpu_to_le16(BRCMF_EXTAUTH_SUCCESS);
+ } else {
+ bphy_err(drvr, "External authentication failed: status=%d\n",
+ params->status);
+ auth_status.flags = cpu_to_le16(BRCMF_EXTAUTH_FAIL);
+ }
+
+ memcpy(auth_status.peer_mac, params->bssid, ETH_ALEN);
+ params->ssid.ssid_len = min_t(u8, params->ssid.ssid_len,
+ IEEE80211_MAX_SSID_LEN);
+ auth_status.ssid_len = cpu_to_le32(params->ssid.ssid_len);
+ memcpy(auth_status.ssid, params->ssid.ssid, params->ssid.ssid_len);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "auth_status", &auth_status,
+ sizeof(auth_status));
+ if (ret < 0)
+ bphy_err(drvr, "auth_status iovar failed: ret=%d\n", ret);
+
+ return ret;
+}
+
+static void brcmf_cyw_get_cfg80211_ops(struct brcmf_pub *drvr)
+{
+ drvr->ops->mgmt_tx = brcmf_cyw_mgmt_tx;
+ drvr->ops->external_auth = brcmf_cyw_external_auth;
+}
+
+static s32
+brcmf_cyw_notify_ext_auth_req(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct cfg80211_external_auth_params params;
+ struct brcmf_auth_req_status_le *auth_req =
+ (struct brcmf_auth_req_status_le *)data;
+ s32 err = 0;
+
+ brcmf_dbg(INFO, "Enter: event %s (%d) received\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+
+ if (e->datalen < sizeof(*auth_req)) {
+ bphy_err(drvr, "Event %s (%d) data too small. Ignore\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+ return -EINVAL;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.action = NL80211_EXTERNAL_AUTH_START;
+ params.key_mgmt_suite = WLAN_AKM_SUITE_SAE;
+ params.status = WLAN_STATUS_SUCCESS;
+ params.ssid.ssid_len = min_t(u32, 32, le32_to_cpu(auth_req->ssid_len));
+ memcpy(params.ssid.ssid, auth_req->ssid, params.ssid.ssid_len);
+ memcpy(params.bssid, auth_req->peer_mac, ETH_ALEN);
+
+ err = cfg80211_external_auth_request(ifp->ndev, &params, GFP_KERNEL);
+ if (err)
+ bphy_err(drvr, "Ext Auth request to supplicant failed (%d)\n",
+ err);
+
+ return err;
+}
+
+static s32
+brcmf_notify_auth_frame_rx(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_cfg80211_info *cfg = drvr->config;
+ struct wireless_dev *wdev;
+ u32 mgmt_frame_len = e->datalen - sizeof(struct brcmf_rx_mgmt_data);
+ struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
+ u8 *frame = (u8 *)(rxframe + 1);
+ struct brcmu_chan ch;
+ struct ieee80211_mgmt *mgmt_frame;
+ s32 freq;
+
+ brcmf_dbg(INFO, "Enter: event %s (%d) received\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+
+ if (e->datalen < sizeof(*rxframe)) {
+ bphy_err(drvr, "Event %s (%d) data too small. Ignore\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code);
+ return -EINVAL;
+ }
+
+ wdev = &ifp->vif->wdev;
+ WARN_ON(!wdev);
+
+ ch.chspec = be16_to_cpu(rxframe->chanspec);
+ cfg->d11inf.decchspec(&ch);
+
+ mgmt_frame = kzalloc(mgmt_frame_len, GFP_KERNEL);
+ if (!mgmt_frame)
+ return -ENOMEM;
+
+ mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_AUTH);
+ memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN);
+ memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
+ brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSSID, mgmt_frame->bssid,
+ ETH_ALEN);
+ frame += offsetof(struct ieee80211_mgmt, u);
+ memcpy(&mgmt_frame->u, frame,
+ mgmt_frame_len - offsetof(struct ieee80211_mgmt, u));
+
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num,
+ ch.band == BRCMU_CHAN_BAND_2G ?
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ);
+
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ NL80211_RXMGMT_FLAG_EXTERNAL_AUTH);
+ kfree(mgmt_frame);
+ return 0;
+}
+
+static s32
+brcmf_notify_mgmt_tx_status(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e, void *data)
+{
+ struct brcmf_cfg80211_vif *vif = ifp->vif;
+ u32 *packet_id = (u32 *)data;
+
+ brcmf_dbg(INFO, "Enter: event %s (%d), status=%d\n",
+ brcmf_fweh_event_name(e->event_code), e->event_code,
+ e->status);
+
+ if (!test_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status) ||
+ (*packet_id != vif->mgmt_tx_id))
+ return 0;
+
+ if (e->event_code == BRCMF_E_MGMT_FRAME_TXSTATUS) {
+ if (e->status == BRCMF_E_STATUS_SUCCESS)
+ set_bit(BRCMF_MGMT_TX_ACK, &vif->mgmt_tx_status);
+ else
+ set_bit(BRCMF_MGMT_TX_NOACK, &vif->mgmt_tx_status);
+ } else {
+ set_bit(BRCMF_MGMT_TX_OFF_CHAN_COMPLETED, &vif->mgmt_tx_status);
+ }
+
+ complete(&vif->mgmt_tx);
+ return 0;
+}
+
+static void brcmf_cyw_register_event_handlers(struct brcmf_pub *drvr)
+{
+ brcmf_fweh_register(drvr, BRCMF_E_EXT_AUTH_REQ,
+ brcmf_cyw_notify_ext_auth_req);
+ brcmf_fweh_register(drvr, BRCMF_E_EXT_AUTH_FRAME_RX,
+ brcmf_notify_auth_frame_rx);
+ brcmf_fweh_register(drvr, BRCMF_E_MGMT_FRAME_TXSTATUS,
+ brcmf_notify_mgmt_tx_status);
+ brcmf_fweh_register(drvr, BRCMF_E_MGMT_FRAME_OFFCHAN_DONE,
+ brcmf_notify_mgmt_tx_status);
+}
+
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
.set_sae_password = brcmf_cyw_set_sae_pwd,
.alloc_fweh_info = brcmf_cyw_alloc_fweh_info,
+ .activate_events = brcmf_cyw_activate_events,
+ .get_cfg80211_ops = brcmf_cyw_get_cfg80211_ops,
+ .register_event_handlers = brcmf_cyw_register_event_handlers,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
new file mode 100644
index 000000000000..08c69142495a
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ */
+
+#ifndef CYW_FWIL_TYPES_H_
+#define CYW_FWIL_TYPES_H_
+
+#include <fwil_types.h>
+
+enum brcmf_event_msgs_ext_command {
+ CYW_EVENTMSGS_NONE = 0,
+ CYW_EVENTMSGS_SET_BIT = 1,
+ CYW_EVENTMSGS_RESET_BIT = 2,
+ CYW_EVENTMSGS_SET_MASK = 3,
+};
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE offsetof(struct eventmsgs_ext, mask[0])
+
+/**
+ * struct brcmf_eventmsgs_ext - structure used with "eventmsgs_ext" iovar.
+ *
+ * @ver: version.
+ * @command: requested operation (see &enum event_msgs_ext_command).
+ * @len: length of the @mask array.
+ * @maxgetsize: indicates maximum mask size that may be returned by firmware
+ * upon iovar GET.
+ * @mask: array where each bit represents firmware event.
+ */
+struct brcmf_eventmsgs_ext {
+ u8 ver;
+ u8 command;
+ u8 len;
+ u8 maxgetsize;
+ u8 mask[] __counted_by(len);
+};
+
+#define BRCMF_EXTAUTH_START 1
+#define BRCMF_EXTAUTH_ABORT 2
+#define BRCMF_EXTAUTH_FAIL 3
+#define BRCMF_EXTAUTH_SUCCESS 4
+
+/**
+ * struct brcmf_auth_req_status_le - external auth request and status update
+ *
+ * @flags: flags for external auth status
+ * @peer_mac: peer MAC address
+ * @ssid_len: length of ssid
+ * @ssid: ssid characters
+ * @pmkid: PMKSA identifier
+ */
+struct brcmf_auth_req_status_le {
+ __le16 flags;
+ u8 peer_mac[ETH_ALEN];
+ __le32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 pmkid[WLAN_PMKID_LEN];
+};
+
+/**
+ * struct brcmf_mf_params_le - management frame parameters for mgmt_frame iovar
+ *
+ * @version: version of the iovar
+ * @dwell_time: dwell duration in ms
+ * @len: length of frame data
+ * @frame_control: frame control
+ * @channel: channel
+ * @da: peer MAC address
+ * @bssid: BSS network identifier
+ * @packet_id: packet identifier
+ * @data: frame data
+ */
+struct brcmf_mf_params_le {
+ __le32 version;
+ __le32 dwell_time;
+ __le16 len;
+ __le16 frame_control;
+ __le16 channel;
+ u8 da[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le32 packet_id;
+ u8 data[] __counted_by(len);
+};
+
+#endif /* CYW_FWIL_TYPES_H_ */
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 0d9ae197fa1e..488364ef8ff2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -42,8 +42,9 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MONITOR_FLAG, "rtap" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
- { BRCMF_FEAT_SAE, "sae" },
+ { BRCMF_FEAT_SAE, "sae " },
{ BRCMF_FEAT_FWAUTH, "idauth" },
+ { BRCMF_FEAT_SAE_EXT, "sae_ext" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 7f4f0b3e4a7b..31f8695ca417 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -31,6 +31,7 @@
* FWAUTH: Firmware authenticator
* DUMP_OBSS: Firmware has capable to dump obss info to support ACS
* SCAN_V2: Version 2 scan params
+ * SAE_EXT: SAE authentication handled by user-space supplicant
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -57,7 +58,8 @@
BRCMF_FEAT_DEF(DUMP_OBSS) \
BRCMF_FEAT_DEF(SCAN_V2) \
BRCMF_FEAT_DEF(PMKID_V2) \
- BRCMF_FEAT_DEF(PMKID_V3)
+ BRCMF_FEAT_DEF(PMKID_V3) \
+ BRCMF_FEAT_DEF(SAE_EXT)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index f0b6a7607f16..c2d98ee6652f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -75,6 +75,7 @@ const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
return "nodebug";
}
#endif
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fweh_event_name);
/**
* brcmf_fweh_queue_event() - create and queue event.
@@ -405,6 +406,7 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_event_name(code));
return 0;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fweh_register);
/**
* brcmf_fweh_unregister() - remove handler for given code.
@@ -448,11 +450,14 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
brcmf_dbg(EVENT, "enable event IF\n");
setbit(fweh->event_mask, BRCMF_E_IF);
+ /* allow per-vendor method to activate firmware events */
+ if (!brcmf_fwvid_activate_events(ifp))
+ return 0;
+
err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
fweh->event_mask_len);
if (err)
bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
-
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index eed439b84010..e327dd58d29c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -94,7 +94,11 @@ struct brcmf_cfg80211_info;
BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
- BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+ BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+ BRCMF_ABSTRACT_ENUM_DEF(EXT_AUTH_REQ, 0) \
+ BRCMF_ABSTRACT_ENUM_DEF(EXT_AUTH_FRAME_RX, 1) \
+ BRCMF_ABSTRACT_ENUM_DEF(MGMT_FRAME_TXSTATUS, 2) \
+ BRCMF_ABSTRACT_ENUM_DEF(MGMT_FRAME_OFFCHAN_DONE, 3)
#define BRCMF_ENUM_DEF(id, val) \
BRCMF_E_##id = (val),
@@ -337,7 +341,7 @@ struct brcmf_fweh_info {
struct list_head event_q;
uint event_mask_len;
u8 *event_mask;
- struct brcmf_fweh_event_map *event_map;
+ const struct brcmf_fweh_event_map *event_map;
uint num_event_codes;
brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes);
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index e6ac9fc341bc..f3e011d090f2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -15,6 +15,9 @@ struct brcmf_fwvid_ops {
void (*feat_attach)(struct brcmf_if *ifp);
int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
int (*alloc_fweh_info)(struct brcmf_pub *drvr);
+ int (*activate_events)(struct brcmf_if *ifp);
+ void (*get_cfg80211_ops)(struct brcmf_pub *drvr);
+ void (*register_event_handlers)(struct brcmf_pub *drvr);
};
/* exported functions */
@@ -56,4 +59,30 @@ static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr)
return drvr->vops->alloc_fweh_info(drvr);
}
+static inline int brcmf_fwvid_activate_events(struct brcmf_if *ifp)
+{
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->activate_events)
+ return -EOPNOTSUPP;
+
+ return vops->activate_events(ifp);
+}
+
+static inline void brcmf_fwvid_get_cfg80211_ops(struct brcmf_pub *drvr)
+{
+ if (!drvr->vops || !drvr->vops->get_cfg80211_ops)
+ return;
+
+ drvr->vops->get_cfg80211_ops(drvr);
+}
+
+static inline void brcmf_fwvid_register_event_handlers(struct brcmf_pub *drvr)
+{
+ if (!drvr->vops || !drvr->vops->register_event_handlers)
+ return;
+
+ drvr->vops->register_event_handlers(drvr);
+}
+
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index d06c724f63d9..b056336d5da6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -741,15 +741,19 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
void *buffer, int buflen)
{
int ret;
- char *tmpbuf;
+ char *tmpbuf = NULL;
u16 size;
- if ((!devinfo) || (devinfo->ctl_urb == NULL))
- return -EINVAL;
+ if (!devinfo || !devinfo->ctl_urb) {
+ ret = -EINVAL;
+ goto err;
+ }
tmpbuf = kmalloc(buflen, GFP_ATOMIC);
- if (!tmpbuf)
- return -ENOMEM;
+ if (!tmpbuf) {
+ ret = -ENOMEM;
+ goto err;
+ }
size = buflen;
devinfo->ctl_urb->transfer_buffer_length = size;
@@ -770,18 +774,23 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
if (ret < 0) {
brcmf_err("usb_submit_urb failed %d\n", ret);
- goto finalize;
+ goto err;
}
if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
usb_kill_urb(devinfo->ctl_urb);
ret = -ETIMEDOUT;
+ goto err;
} else {
memcpy(buffer, tmpbuf, buflen);
}
-finalize:
kfree(tmpbuf);
+ return 0;
+
+err:
+ kfree(tmpbuf);
+ brcmf_err("dl cmd %u failed: err=%d\n", cmd, ret);
return ret;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
index 50d817485cf9..0cb64fc56783 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
@@ -198,7 +198,7 @@
#define CST4313_SPROM_OTP_SEL_SHIFT 0
/* 4313 Chip specific ChipControl register bits */
- /* 12 mA drive strengh for later 4313 */
+ /* 12 mA drive strength for later 4313 */
#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
/* Manufacturer Ids */
@@ -453,7 +453,7 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
/* get chipcommon chipstatus */
sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
- /* get chipcommon capabilites */
+ /* get chipcommon capabilities */
sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
/* get pmu rev and caps */
@@ -657,7 +657,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
}
/*
- * clock control policy function throught chipcommon
+ * clock control policy function through chipcommon
*
* set dynamic clk control mode (forceslow, forcefast, dynamic)
* returns true if we are forcing fast clock
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h
index 90b6e3982d2c..e791dd07ca78 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h
@@ -76,7 +76,7 @@
* conventions for the use the flash space:
*/
-/* Minumum amount of flash we support */
+/* Minimum amount of flash we support */
#define FLASH_MIN 0x00020000 /* Minimum flash size */
#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index a767cbb79185..e1d707a7c964 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -479,7 +479,7 @@ void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
/*
* Preps the given packet for AMPDU based on the session data. If the
- * frame cannot be accomodated in the current session, -ENOSPC is
+ * frame cannot be accommodated in the current session, -ENOSPC is
* returned.
*/
int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
@@ -529,7 +529,7 @@ int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
}
/*
- * Now that we're sure this frame can be accomodated, update the
+ * Now that we're sure this frame can be accommodated, update the
* session information.
*/
session->ampdu_len += len;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index d1b9a18d0374..3878c4124e25 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -445,7 +445,7 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
/*
* OFDM 40 MHz SISO has the same power as the corresponding
- * MCS0-7 rate unless overriden by the locale specific code.
+ * MCS0-7 rate unless overridden by the locale specific code.
* We set this value to 0 as a flag (presumably 0 dBm isn't
* a possibility) and then copy the MCS0-7 value to the 40 MHz
* value if it wasn't explicitly set.
@@ -479,7 +479,7 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
/*
* 20 MHz has the same power as the corresponding OFDM rate
- * unless overriden by the locale specific code.
+ * unless overridden by the locale specific code.
*/
txpwr->mcs_20_siso[i] = txpwr->ofdm[i];
txpwr->mcs_40_siso[i] = 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
index 80c35027787a..c739bf7463b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
@@ -1348,7 +1348,7 @@ static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
ret = brcms_c_ampdu_add_frame(session, p);
if (ret == -ENOSPC) {
/*
- * AMPDU cannot accomodate this frame. Close out the in-
+ * AMPDU cannot accommodate this frame. Close out the in-
* progress AMPDU session and start a new one.
*/
ampdu_finalize(di);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 5b1d35601bbd..1c3d29dca424 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -48,9 +48,9 @@
FIF_BCN_PRBRESP_PROMISC | \
FIF_PSPOLL)
-#define CHAN2GHZ(channel, freqency, chflags) { \
+#define CHAN2GHZ(channel, frequency, chflags) { \
.band = NL80211_BAND_2GHZ, \
- .center_freq = (freqency), \
+ .center_freq = (frequency), \
.hw_value = (channel), \
.flags = chflags, \
.max_antenna_gain = 0, \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 2738d4d6c60a..c1a9c1e442ee 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -921,7 +921,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
* The "fallback limit" is the number of tx attempts a given
* MPDU is sent at the "primary" rate. Tx attempts beyond that
* limit are sent at the "secondary" rate.
- * A 'short frame' does not exceed RTS treshold.
+ * A 'short frame' does not exceed RTS threshold.
*/
u16 sfbl, /* Short Frame Rate Fallback Limit */
lfbl, /* Long Frame Rate Fallback Limit */
@@ -6259,7 +6259,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
}
/*
- * Currently only support same setting for primay and
+ * Currently only support same setting for primary and
* fallback rates. Unify flags for each rate into a
* single value for the frame
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index 9f76b880814e..b7ca0d9891c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -603,7 +603,7 @@ enum brcms_bss_type {
* cur_etheraddr: h/w address
* flags: BSSCFG flags; see below
*
- * current_bss: BSS parms in ASSOCIATED state
+ * current_bss: BSS parameters in ASSOCIATED state
*
*
* ID: 'unique' ID of this bsscfg, assigned at bsscfg allocation
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c
index 71b80381f3ad..c9a29e626daa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c
@@ -31,7 +31,7 @@
#define EXT_ILP_HZ 32768
/*
- * Duration for ILP clock frequency measurment in milliseconds
+ * Duration for ILP clock frequency measurement in milliseconds
*
* remark: 1000 must be an integer multiple of this duration
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 9546ceeaf5e3..3f476e333726 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -8,12 +8,23 @@ iwlwifi-objs += iwl-nvm-utils.o
iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
-iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
+
+CFLAGS_pcie/drv.o += -Wno-override-init
+
+# combined MAC/RF configurations
+iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o
+iwlwifi-$(CONFIG_IWLDVM) += cfg/5000.o cfg/6000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o
+# MAC configurations
+iwlwifi-$(CONFIG_IWLMVM) += cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
+# RF configurations
+iwlwifi-$(CONFIG_IWLMVM) += cfg/rf-jf.o cfg/rf-hr.o cfg/rf-gf.o
+iwlwifi-$(CONFIG_IWLMLD) += cfg/rf-fm.o cfg/rf-wh.o cfg/rf-pe.o
+
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index f172ffd2a841..c029e595e7c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -29,7 +29,7 @@
#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl1000_base_params = {
+static const struct iwl_family_base_params iwl1000_base = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
@@ -42,12 +42,6 @@ static const struct iwl_base_params iwl1000_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl1000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(NL80211_BAND_2GHZ),
-};
-
static const struct iwl_eeprom_params iwl1000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -60,54 +54,67 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
}
};
+const struct iwl_mac_cfg iwl1000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_1000,
+ .base = &iwl1000_base,
+};
+
#define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_1000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_1000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK
-const struct iwl_cfg iwl1000_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
+const struct iwl_rf_cfg iwl1000_bgn_cfg = {
IWL_DEVICE_1000,
- .ht_params = &iwl1000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl1000_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
+const char iwl1000_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 1000 BGN";
+
+const struct iwl_rf_cfg iwl1000_bg_cfg = {
IWL_DEVICE_1000,
};
+const char iwl1000_bg_name[] = "Intel(R) Centrino(R) Wireless-N 1000 BG";
+
#define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_100, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_1000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .trans.base_params = &iwl1000_base_params, \
.eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-const struct iwl_cfg iwl100_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
+const struct iwl_rf_cfg iwl100_bgn_cfg = {
IWL_DEVICE_100,
- .ht_params = &iwl1000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl100_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
+const char iwl100_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 100 BGN";
+
+const struct iwl_rf_cfg iwl100_bg_cfg = {
IWL_DEVICE_100,
};
+const char iwl100_bg_name[] = "Intel(R) Centrino(R) Wireless-N 100 BG";
+
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index 6f3f26da0ad5..47554a5f406e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -40,7 +40,7 @@
#define IWL135_FW_PRE "iwlwifi-135"
#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl2000_base_params = {
+static const struct iwl_family_base_params iwl2000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -54,7 +54,7 @@ static const struct iwl_base_params iwl2000_base_params = {
};
-static const struct iwl_base_params iwl2030_base_params = {
+static const struct iwl_family_base_params iwl2030_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -67,12 +67,6 @@ static const struct iwl_base_params iwl2030_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl2000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(NL80211_BAND_2GHZ),
-};
-
static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -86,97 +80,119 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
.enhanced_txpower = true,
};
+const struct iwl_mac_cfg iwl2000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_2000,
+ .base = &iwl2000_base,
+};
+
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl2000_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
+const struct iwl_rf_cfg iwl2000_2bgn_cfg = {
IWL_DEVICE_2000,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl2000_2bgn_d_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
- IWL_DEVICE_2000,
- .ht_params = &iwl2000_ht_params,
+const char iwl2000_2bgn_name[] = "Intel(R) Centrino(R) Wireless-N 2200 BGN";
+const char iwl2000_2bgn_d_name[] = "Intel(R) Centrino(R) Wireless-N 2200D BGN";
+
+const struct iwl_mac_cfg iwl2030_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_2030,
+ .base = &iwl2030_base,
};
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_2030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl2030_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
+const struct iwl_rf_cfg iwl2030_2bgn_cfg = {
IWL_DEVICE_2030,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
+};
+
+const char iwl2030_2bgn_name[] = "Intel(R) Centrino(R) Wireless-N 2230 BGN";
+
+const struct iwl_mac_cfg iwl105_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_105,
+ .base = &iwl2000_base,
};
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_105, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2000_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-const struct iwl_cfg iwl105_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
+const struct iwl_rf_cfg iwl105_bgn_cfg = {
IWL_DEVICE_105,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
-const struct iwl_cfg iwl105_bgn_d_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
- IWL_DEVICE_105,
- .ht_params = &iwl2000_ht_params,
+const char iwl105_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 105 BGN";
+const char iwl105_bgn_d_name[] = "Intel(R) Centrino(R) Wireless-N 105D BGN";
+
+const struct iwl_mac_cfg iwl135_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_135,
+ .base = &iwl2030_base,
};
#define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_135, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_2000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .trans.base_params = &iwl2030_base_params, \
.eeprom_params = &iwl20x0_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
-const struct iwl_cfg iwl135_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
+const struct iwl_rf_cfg iwl135_bgn_cfg = {
IWL_DEVICE_135,
- .ht_params = &iwl2000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
+ },
};
+const char iwl135_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 135 BGN";
+
MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 130b9a8aa7eb..52e0beebf9ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -15,14 +15,7 @@
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 77
-/* NVM versions */
-#define IWL_22000_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_22000_DCCM2_OFFSET 0x880000
-#define IWL_22000_DCCM2_LEN 0x8000
#define IWL_22000_SMEM_OFFSET 0x400000
#define IWL_22000_SMEM_LEN 0xD0000
@@ -44,11 +37,12 @@
IWL_QU_C_HR_B_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_QU_C_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QU_C_JF_B_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_CC_A_MODULE_FIRMWARE(api) \
IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_22000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_22000_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
@@ -57,155 +51,78 @@ static const struct iwl_base_params iwl_22000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
-};
-
-const struct iwl_ht_params iwl_22000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
- BIT(NL80211_BAND_6GHZ),
-};
-
-#define IWL_DEVICE_22000_COMMON \
- .ucode_api_min = IWL_22000_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_22000_DCCM_OFFSET, \
- .dccm_len = IWL_22000_DCCM_LEN, \
- .dccm2_offset = IWL_22000_DCCM2_OFFSET, \
- .dccm2_len = IWL_22000_DCCM2_LEN, \
- .smem_offset = IWL_22000_SMEM_OFFSET, \
- .smem_len = IWL_22000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x380, \
- .ht_params = &iwl_22000_ht_params, \
- .nvm_ver = IWL_22000_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x400000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_22500 \
- IWL_DEVICE_22000_COMMON, \
- .ucode_api_max = IWL_22000_UCODE_API_MAX, \
- .trans.device_family = IWL_DEVICE_FAMILY_22000, \
- .trans.base_params = &iwl_22000_base_params, \
- .gp2_reg_addr = 0xa02c68, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = MON_BUFF_WRPTR_VER2, \
- .mask = 0xffffffff, \
- }, \
- .cycle_cnt = { \
- .addr = MON_BUFF_CYCLE_CNT_VER2, \
- .mask = 0xffffffff, \
- }, \
- }
-
-const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
+ .smem_offset = IWL_22000_SMEM_OFFSET,
+ .smem_len = IWL_22000_SMEM_LEN,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x380,
+ .min_umac_error_event_table = 0x400000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .gp2_reg_addr = 0xa02c68,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = MON_BUFF_WRPTR_VER2,
+ .mask = 0xffffffff,
+ },
+ .cycle_cnt = {
+ .addr = MON_BUFF_CYCLE_CNT_VER2,
+ .mask = 0xffffffff,
+ },
+ },
+ .ucode_api_min = IWL_22000_UCODE_API_MIN,
+ .ucode_api_max = IWL_22000_UCODE_API_MAX,
+};
+
+const struct iwl_mac_cfg iwl_qu_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.integrated = true,
.xtal_latency = 500,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
-const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl_qu_medium_latency_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.integrated = true,
.xtal_latency = 1820,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US,
};
-const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl_qu_long_latency_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.integrated = true,
.xtal_latency = 12000,
.low_latency_xtal = true,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-/*
- * If the device doesn't support HE, no need to have that many buffers.
- * 22000 devices can split multiple frames into a single RB, so fewer are
- * needed; AX210 cannot (but use smaller RBs by default) - these sizes
- * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
- * additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_NON_HE 512
-#define IWL_NUM_RBDS_22000_HE 2048
-
-/*
- * All JF radio modules are part of the 9000 series, but the MAC part
- * looks more like 22000. That's why this device is here, but called
- * 9560 nevertheless.
- */
-const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = {
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = {
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = {
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
+const struct iwl_mac_cfg iwl_ax200_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_22000,
- .base_params = &iwl_22000_base_params,
+ .base = &iwl_22000_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.bisr_workaround = 1,
};
-const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
-const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
-const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
-const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
-
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
const char iwl_ax200_killer_1650x_name[] =
@@ -215,213 +132,10 @@ const char iwl_ax201_killer_1650s_name[] =
const char iwl_ax201_killer_1650i_name[] =
"Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)";
-const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .tx_with_siso_diversity = true,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_qu_b0_hr_b0 = {
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .tx_with_siso_diversity = true,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_qu_c0_hr_b0 = {
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_quz_a0_hr1_b0 = {
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .tx_with_siso_diversity = true,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_ax200_cfg_cc = {
- .fw_name_pre = IWL_CC_A_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)",
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
- .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)",
- .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
- .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QU_C_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index de7ede59a994..aaae3b1f5433 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -30,7 +30,7 @@
#define IWL5150_FW_PRE "iwlwifi-5150"
#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl5000_base_params = {
+static const struct iwl_family_base_params iwl5000_base = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -41,11 +41,6 @@ static const struct iwl_base_params iwl5000_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl5000_ht_params = {
- .ht_greenfield_support = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
static const struct iwl_eeprom_params iwl5000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -58,93 +53,107 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
},
};
+const struct iwl_mac_cfg iwl5000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_5000,
+ .base = &iwl5000_base,
+};
+
#define IWL_DEVICE_5000 \
.fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_5000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_5000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
- .trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK
-const struct iwl_cfg iwl5300_agn_cfg = {
- .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
+const struct iwl_rf_cfg iwl5300_agn_cfg = {
IWL_DEVICE_5000,
/* at least EEPROM 0x11A has wrong info */
.valid_tx_ant = ANT_ABC, /* .cfg overwrite */
.valid_rx_ant = ANT_ABC, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl5100_bgn_cfg = {
- .name = "Intel(R) WiFi Link 5100 BGN",
- IWL_DEVICE_5000,
- .valid_tx_ant = ANT_B, /* .cfg overwrite */
- .valid_rx_ant = ANT_AB, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
-};
+const char iwl5300_agn_name[] = "Intel(R) Ultimate N WiFi Link 5300 AGN";
-const struct iwl_cfg iwl5100_abg_cfg = {
- .name = "Intel(R) WiFi Link 5100 ABG",
+const struct iwl_rf_cfg iwl5100_n_cfg = {
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
.valid_rx_ant = ANT_AB, /* .cfg overwrite */
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl5100_agn_cfg = {
- .name = "Intel(R) WiFi Link 5100 AGN",
+const char iwl5100_bgn_name[] = "Intel(R) WiFi Link 5100 BGN";
+
+const struct iwl_rf_cfg iwl5100_abg_cfg = {
IWL_DEVICE_5000,
.valid_tx_ant = ANT_B, /* .cfg overwrite */
.valid_rx_ant = ANT_AB, /* .cfg overwrite */
- .ht_params = &iwl5000_ht_params,
};
-const struct iwl_cfg iwl5350_agn_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
+const char iwl5100_abg_name[] = "Intel(R) WiFi Link 5100 ABG";
+const char iwl5100_agn_name[] = "Intel(R) WiFi Link 5100 AGN";
+
+const struct iwl_rf_cfg iwl5350_agn_cfg = {
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
- .trans.device_family = IWL_DEVICE_FAMILY_5000,
.max_inst_size = IWLAGN_RTC_INST_SIZE,
.max_data_size = IWLAGN_RTC_DATA_SIZE,
.nvm_ver = EEPROM_5050_EEPROM_VERSION,
.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .trans.base_params = &iwl5000_base_params,
.eeprom_params = &iwl5000_eeprom_params,
- .ht_params = &iwl5000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
};
+const char iwl5350_agn_name[] = "Intel(R) WiMAX/WiFi Link 5350 AGN";
+
+const struct iwl_mac_cfg iwl5150_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_5150,
+ .base = &iwl5000_base,
+};
+
#define IWL_DEVICE_5150 \
.fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_5150, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_5050_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
- .trans.base_params = &iwl5000_base_params, \
.eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-const struct iwl_cfg iwl5150_agn_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
+const struct iwl_rf_cfg iwl5150_agn_cfg = {
IWL_DEVICE_5150,
- .ht_params = &iwl5000_ht_params,
-
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl5150_abg_cfg = {
- .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
+const char iwl5150_agn_name[] = "Intel(R) WiMAX/WiFi Link 5150 AGN";
+
+const struct iwl_rf_cfg iwl5150_abg_cfg = {
IWL_DEVICE_5150,
};
+const char iwl5150_abg_name[] = "Intel(R) WiMAX/WiFi Link 5150 ABG";
+
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index f013cf420569..ab13394896e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2020, 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -49,7 +49,7 @@
#define IWL6030_FW_PRE "iwlwifi-6000g2b"
#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl6000_base_params = {
+static const struct iwl_family_base_params iwl6000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -62,7 +62,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_base_params iwl6050_base_params = {
+static const struct iwl_family_base_params iwl6050_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -75,7 +75,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_base_params iwl6000_g2_base_params = {
+static const struct iwl_family_base_params iwl6000_g2_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
@@ -88,12 +88,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.scd_chain_ext_wa = true,
};
-static const struct iwl_ht_params iwl6000_ht_params = {
- .ht_greenfield_support = true,
- .use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -107,141 +101,126 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.enhanced_txpower = true,
};
+const struct iwl_mac_cfg iwl6005_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6005,
+ .base = &iwl6000_g2_base,
+};
+
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6005_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl6005_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
-
-const struct iwl_cfg iwl6005_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
- IWL_DEVICE_6005,
-};
-
-const struct iwl_cfg iwl6005_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
+const struct iwl_rf_cfg iwl6005_n_cfg = {
IWL_DEVICE_6005,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6005_2agn_sff_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
+const char iwl6005_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6205 AGN";
+const char iwl6005_2agn_sff_name[] = "Intel(R) Centrino(R) Advanced-N 6205S AGN";
+const char iwl6005_2agn_d_name[] = "Intel(R) Centrino(R) Advanced-N 6205D AGN";
+const char iwl6005_2agn_mow1_name[] = "Intel(R) Centrino(R) Advanced-N 6206 AGN";
+const char iwl6005_2agn_mow2_name[] = "Intel(R) Centrino(R) Advanced-N 6207 AGN";
-const struct iwl_cfg iwl6005_2agn_d_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+const struct iwl_rf_cfg iwl6005_non_n_cfg = {
IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
};
-const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
-};
+const char iwl6005_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6205 ABG";
+const char iwl6005_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6205 BG";
-const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
- IWL_DEVICE_6005,
- .ht_params = &iwl6000_ht_params,
+const struct iwl_mac_cfg iwl6030_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6030,
+ .base = &iwl6000_g2_base,
};
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl6030_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+const struct iwl_rf_cfg iwl6030_n_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6030_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
- IWL_DEVICE_6030,
-};
+const char iwl6030_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6230 AGN";
+const char iwl6030_2bgn_name[] = "Intel(R) Centrino(R) Advanced-N 6230 BGN";
+const char iwl1030_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 1030 BGN";
+const char iwl1030_bg_name[] = "Intel(R) Centrino(R) Wireless-N 1030 BG";
-const struct iwl_cfg iwl6030_2bgn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
+const struct iwl_rf_cfg iwl6030_non_n_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
};
-const struct iwl_cfg iwl6030_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
- IWL_DEVICE_6030,
-};
+const char iwl6030_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6230 ABG";
+const char iwl6030_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6230 BG";
#define IWL_DEVICE_6035 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6035_UCODE_API_MAX, \
.ucode_api_min = IWL6035_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE
-const struct iwl_cfg iwl6035_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
+const struct iwl_rf_cfg iwl6035_2agn_cfg = {
IWL_DEVICE_6035,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6035_2agn_sff_cfg = {
- .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
- IWL_DEVICE_6035,
- .ht_params = &iwl6000_ht_params,
-};
+const char iwl6035_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6235 AGN";
+const char iwl6035_2agn_sff_name[] = "Intel(R) Centrino(R) Ultimate-N 6235 AGN";
-const struct iwl_cfg iwl1030_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
+const struct iwl_rf_cfg iwl130_bgn_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
+ .rx_with_siso_diversity = true,
};
-const struct iwl_cfg iwl1030_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
- IWL_DEVICE_6030,
-};
+const char iwl130_bgn_name[] = "Intel(R) Centrino(R) Wireless-N 130 BGN";
-const struct iwl_cfg iwl130_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
+const struct iwl_rf_cfg iwl130_bg_cfg = {
IWL_DEVICE_6030,
- .ht_params = &iwl6000_ht_params,
.rx_with_siso_diversity = true,
};
-const struct iwl_cfg iwl130_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
- IWL_DEVICE_6030,
- .rx_with_siso_diversity = true,
+const char iwl130_bg_name[] = "Intel(R) Centrino(R) Wireless-N 130 BG";
+
+const struct iwl_mac_cfg iwl6000i_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6000i,
+ .base = &iwl6000_base,
};
/*
@@ -251,101 +230,127 @@ const struct iwl_cfg iwl130_bg_cfg = {
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6000i, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
.nvm_ver = EEPROM_6000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
- .trans.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK
-const struct iwl_cfg iwl6000i_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
+const struct iwl_rf_cfg iwl6000i_2agn_cfg = {
IWL_DEVICE_6000i,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6000i_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
+const char iwl6000i_2agn_name[] = "Intel(R) Centrino(R) Advanced-N 6200 AGN";
+
+const struct iwl_rf_cfg iwl6000i_non_n_cfg = {
IWL_DEVICE_6000i,
};
-const struct iwl_cfg iwl6000i_2bg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
- IWL_DEVICE_6000i,
+const char iwl6000i_2abg_name[] = "Intel(R) Centrino(R) Advanced-N 6200 ABG";
+const char iwl6000i_2bg_name[] = "Intel(R) Centrino(R) Advanced-N 6200 BG";
+
+const struct iwl_mac_cfg iwl6050_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6050,
+ .base = &iwl6050_base,
};
#define IWL_DEVICE_6050 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6050, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
.nvm_ver = EEPROM_6050_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
- .trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-const struct iwl_cfg iwl6050_2agn_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
+const struct iwl_rf_cfg iwl6050_2agn_cfg = {
IWL_DEVICE_6050,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6050_2abg_cfg = {
- .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
+const char iwl6050_2agn_name[] = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN";
+
+const struct iwl_rf_cfg iwl6050_2abg_cfg = {
IWL_DEVICE_6050,
};
+const char iwl6050_2abg_name[] = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG";
+
+const struct iwl_mac_cfg iwl6150_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6150,
+ .base = &iwl6050_base,
+};
+
#define IWL_DEVICE_6150 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_6150, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6150_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
- .trans.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
-const struct iwl_cfg iwl6150_bgn_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
+const struct iwl_rf_cfg iwl6150_bgn_cfg = {
IWL_DEVICE_6150,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
};
-const struct iwl_cfg iwl6150_bg_cfg = {
- .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+const char iwl6150_bgn_name[] = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN";
+
+const struct iwl_rf_cfg iwl6150_bg_cfg = {
IWL_DEVICE_6150,
};
-const struct iwl_cfg iwl6000_3agn_cfg = {
- .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
+const char iwl6150_bg_name[] = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG";
+
+const struct iwl_mac_cfg iwl6000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_6000,
+ .base = &iwl6000_base,
+};
+
+const struct iwl_rf_cfg iwl6000_3agn_cfg = {
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
- .trans.device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE,
.max_data_size = IWL60_RTC_DATA_SIZE,
.nvm_ver = EEPROM_6000_EEPROM_VERSION,
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .trans.base_params = &iwl6000_base_params,
.eeprom_params = &iwl6000_eeprom_params,
- .ht_params = &iwl6000_ht_params,
+ .ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.led_mode = IWL_LED_BLINK,
};
+const char iwl6000_3agn_name[] = "Intel(R) Centrino(R) Ultimate-N 6300 AGN";
+
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 4e2afdedf4c6..f987ad3192c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2023, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -49,7 +49,7 @@
#define IWL7265D_FW_PRE "iwlwifi-7265D"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl7000_base_params = {
+static const struct iwl_family_base_params iwl7000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_16K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
@@ -60,6 +60,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
.apmg_wake_up_wa = true,
+ .nvm_hw_section_num = 0,
};
static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
@@ -84,16 +85,13 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
-static const struct iwl_ht_params iwl7000_ht_params = {
- .stbc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+const struct iwl_mac_cfg iwl7000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_7000,
+ .base = &iwl7000_base,
};
#define IWL_DEVICE_7000_COMMON \
- .trans.device_family = IWL_DEVICE_FAMILY_7000, \
- .trans.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL7000_DCCM_OFFSET
@@ -117,77 +115,52 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.ucode_api_max = IWL7265D_UCODE_API_MAX, \
.ucode_api_min = IWL7265D_UCODE_API_MIN
-const struct iwl_cfg iwl7260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 7260",
+const char iwl7260_2ac_name[] = "Intel(R) Dual Band Wireless AC 7260";
+const char iwl7260_2n_name[] = "Intel(R) Dual Band Wireless N 7260";
+const char iwl7260_n_name[] = "Intel(R) Wireless N 7260";
+const char iwl3160_2ac_name[] = "Intel(R) Dual Band Wireless AC 3160";
+const char iwl3160_2n_name[] = "Intel(R) Dual Band Wireless N 3160";
+const char iwl3160_n_name[] = "Intel(R) Wireless N 3160";
+const char iwl3165_2ac_name[] = "Intel(R) Dual Band Wireless-AC 3165";
+const char iwl3168_2ac_name[] = "Intel(R) Dual Band Wireless-AC 3168";
+const char iwl7265_2ac_name[] = "Intel(R) Dual Band Wireless-AC 7265";
+const char iwl7265_2n_name[] = "Intel(R) Dual Band Wireless-N 7265";
+const char iwl7265_n_name[] = "Intel(R) Wireless-N 7265";
+
+const struct iwl_rf_cfg iwl7260_cfg = {
.fw_name_pre = IWL7260_FW_PRE,
IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7260_NVM_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
};
-const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
- .name = "Intel(R) Dual Band Wireless AC 7260",
+const struct iwl_rf_cfg iwl7260_high_temp_cfg = {
.fw_name_pre = IWL7260_FW_PRE,
IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7260_NVM_VERSION,
- .high_temp = true,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
.thermal_params = &iwl7000_high_temp_tt_params,
};
-const struct iwl_cfg iwl7260_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 7260",
- .fw_name_pre = IWL7260_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL7260_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .lp_xtal_workaround = true,
- .dccm_len = IWL7260_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7260_n_cfg = {
- .name = "Intel(R) Wireless N 7260",
- .fw_name_pre = IWL7260_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL7260_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .lp_xtal_workaround = true,
- .dccm_len = IWL7260_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl3160_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 3160",
+const struct iwl_rf_cfg iwl3160_cfg = {
.fw_name_pre = IWL3160_FW_PRE,
IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL3160_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .dccm_len = IWL3160_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl3160_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 3160",
- .fw_name_pre = IWL3160_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
- .nvm_ver = IWL3160_NVM_VERSION,
- .host_interrupt_operation_mode = true,
- .dccm_len = IWL3160_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl3160_n_cfg = {
- .name = "Intel(R) Wireless N 3160",
- .fw_name_pre = IWL3160_FW_PRE,
- IWL_DEVICE_7000,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL3160_NVM_VERSION,
.host_interrupt_operation_mode = true,
.dccm_len = IWL3160_DCCM_LEN,
@@ -204,88 +177,52 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
{0},
};
-static const struct iwl_ht_params iwl7265_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-const struct iwl_cfg iwl3165_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 3165",
+const struct iwl_rf_cfg iwl3165_2ac_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7005D,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL3165_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
-const struct iwl_cfg iwl3168_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 3168",
+const struct iwl_rf_cfg iwl3168_2ac_cfg = {
.fw_name_pre = IWL3168_FW_PRE,
IWL_DEVICE_3008,
- .ht_params = &iwl7000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL3168_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
.nvm_type = IWL_NVM_SDP,
};
-const struct iwl_cfg iwl7265_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 7265",
- .fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7005,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 7265",
- .fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7005,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265_n_cfg = {
- .name = "Intel(R) Wireless N 7265",
+const struct iwl_rf_cfg iwl7265_cfg = {
.fw_name_pre = IWL7265_FW_PRE,
IWL_DEVICE_7005,
- .ht_params = &iwl7265_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7265_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
-const struct iwl_cfg iwl7265d_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 7265",
- .fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7005D,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265D_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265d_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 7265",
+const struct iwl_rf_cfg iwl7265d_cfg = {
.fw_name_pre = IWL7265D_FW_PRE,
IWL_DEVICE_7005D,
- .ht_params = &iwl7265_ht_params,
- .nvm_ver = IWL7265D_NVM_VERSION,
- .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
- .dccm_len = IWL7265_DCCM_LEN,
-};
-
-const struct iwl_cfg iwl7265d_n_cfg = {
- .name = "Intel(R) Wireless N 7265",
- .fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7005D,
- .ht_params = &iwl7265_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL7265D_NVM_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index d09cf8d7dc01..b56574006ee0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2014, 2018-2020, 2023 Intel Corporation
+ * Copyright (C) 2014, 2018-2020, 2023, 2025 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -35,9 +35,7 @@
#define IWL8265_MODULE_FIRMWARE(api) \
IWL8265_FW_PRE "-" __stringify(api) ".ucode"
-#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
-
-static const struct iwl_base_params iwl8000_base_params = {
+static const struct iwl_family_base_params iwl8000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
@@ -47,12 +45,12 @@ static const struct iwl_base_params iwl8000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl8000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ .nvm_hw_section_num = 10,
+ .features = NETIF_F_RXCSUM,
+ .smem_offset = IWL8260_SMEM_OFFSET,
+ .smem_len = IWL8260_SMEM_LEN,
+ .apmg_not_supported = true,
+ .min_umac_error_event_table = 0x800000,
};
static const struct iwl_tt_params iwl8000_tt_params = {
@@ -76,30 +74,20 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.support_tx_backoff = true,
};
+const struct iwl_mac_cfg iwl8000_mac_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_8000,
+ .base = &iwl8000_base,
+};
+
#define IWL_DEVICE_8000_COMMON \
- .trans.device_family = IWL_DEVICE_FAMILY_8000, \
- .trans.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .features = NETIF_F_RXCSUM, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
.dccm_len = IWL8260_DCCM_LEN, \
.dccm2_offset = IWL8260_DCCM2_OFFSET, \
.dccm2_len = IWL8260_DCCM2_LEN, \
- .smem_offset = IWL8260_SMEM_OFFSET, \
- .smem_len = IWL8260_SMEM_LEN, \
- .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
- .apmg_not_supported = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000
-
-#define IWL_DEVICE_8000 \
- IWL_DEVICE_8000_COMMON, \
- .ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_min = IWL8000_UCODE_API_MIN \
+ .nvm_type = IWL_NVM_EXT
#define IWL_DEVICE_8260 \
IWL_DEVICE_8000_COMMON, \
@@ -111,47 +99,39 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.ucode_api_max = IWL8265_UCODE_API_MAX, \
.ucode_api_min = IWL8265_UCODE_API_MIN \
-const struct iwl_cfg iwl8260_2n_cfg = {
- .name = "Intel(R) Dual Band Wireless N 8260",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8260,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
-};
+const char iwl8260_2n_name[] = "Intel(R) Dual Band Wireless-N 8260";
+const char iwl8260_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8260";
+const char iwl8265_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8265";
+const char iwl8275_2ac_name[] = "Intel(R) Dual Band Wireless-AC 8275";
+const char iwl4165_2ac_name[] = "Intel(R) Dual Band Wireless-AC 4165";
-const struct iwl_cfg iwl8260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 8260",
+const char iwl_killer_1435i_name[] =
+ "Killer(R) Wireless-AC 1435i Wireless Network Adapter (8265D2W)";
+const char iwl_killer_1434_kix_name[] =
+ "Killer(R) Wireless-AC 1435-KIX Wireless Network Adapter (8265NGW)";
+
+const struct iwl_rf_cfg iwl8260_cfg = {
.fw_name_pre = IWL8000_FW_PRE,
IWL_DEVICE_8260,
- .ht_params = &iwl8000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL8000_NVM_VERSION,
};
-const struct iwl_cfg iwl8265_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 8265",
+const struct iwl_rf_cfg iwl8265_cfg = {
.fw_name_pre = IWL8265_FW_PRE,
IWL_DEVICE_8265,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
- .vht_mu_mimo_supported = true,
-};
-
-const struct iwl_cfg iwl8275_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 8275",
- .fw_name_pre = IWL8265_FW_PRE,
- IWL_DEVICE_8265,
- .ht_params = &iwl8000_ht_params,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+ },
.nvm_ver = IWL8000_NVM_VERSION,
.vht_mu_mimo_supported = true,
};
-const struct iwl_cfg iwl4165_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 4165",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
-};
-
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 0130d9a9b78b..ac1fa291cf2f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2018-2021, 2023, 2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -15,14 +15,7 @@
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
-/* NVM versions */
-#define IWL9000_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL9000_DCCM_OFFSET 0x800000
-#define IWL9000_DCCM_LEN 0x18000
-#define IWL9000_DCCM2_OFFSET 0x880000
-#define IWL9000_DCCM2_LEN 0x8000
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
@@ -33,7 +26,7 @@
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl9000_base_params = {
+static const struct iwl_family_base_params iwl9000_base = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
@@ -43,150 +36,69 @@ static const struct iwl_base_params iwl9000_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl9000_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-static const struct iwl_tt_params iwl9000_tt_params = {
- .ct_kill_entry = 115,
- .ct_kill_exit = 93,
- .ct_kill_duration = 5,
- .dynamic_smps_entry = 111,
- .dynamic_smps_exit = 107,
- .tx_protection_entry = 112,
- .tx_protection_exit = 105,
- .tx_backoff = {
- {.temperature = 110, .backoff = 200},
- {.temperature = 111, .backoff = 600},
- {.temperature = 112, .backoff = 1200},
- {.temperature = 113, .backoff = 2000},
- {.temperature = 114, .backoff = 4000},
+ .smem_offset = IWL9000_SMEM_OFFSET,
+ .smem_len = IWL9000_SMEM_LEN,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x380,
+ .min_umac_error_event_table = 0x800000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 92 * 1024,
+ .nvm_hw_section_num = 10,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = MON_BUFF_WRPTR_VER2,
+ .mask = 0xffffffff,
+ },
+ .cycle_cnt = {
+ .addr = MON_BUFF_CYCLE_CNT_VER2,
+ .mask = 0xffffffff,
+ },
},
- .support_ct_kill = true,
- .support_dynamic_smps = true,
- .support_tx_protection = true,
- .support_tx_backoff = true,
+ .ucode_api_max = IWL9000_UCODE_API_MAX,
+ .ucode_api_min = IWL9000_UCODE_API_MIN,
};
-#define IWL_DEVICE_9000 \
- .ucode_api_max = IWL9000_UCODE_API_MAX, \
- .ucode_api_min = IWL9000_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL9000_DCCM_OFFSET, \
- .dccm_len = IWL9000_DCCM_LEN, \
- .dccm2_offset = IWL9000_DCCM2_OFFSET, \
- .dccm2_len = IWL9000_DCCM2_LEN, \
- .smem_offset = IWL9000_SMEM_OFFSET, \
- .smem_len = IWL9000_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .thermal_params = &iwl9000_tt_params, \
- .apmg_not_supported = true, \
- .num_rbds = 512, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x380, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x800000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 92 * 1024, \
- .ht_params = &iwl9000_ht_params, \
- .nvm_ver = IWL9000_NVM_VERSION, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = MON_BUFF_WRPTR_VER2, \
- .mask = 0xffffffff, \
- }, \
- .cycle_cnt = { \
- .addr = MON_BUFF_CYCLE_CNT_VER2, \
- .mask = 0xffffffff, \
- }, \
- }
-
-const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
+const struct iwl_mac_cfg iwl9000_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
};
-const struct iwl_cfg_trans_params iwl9560_trans_cfg = {
+const struct iwl_mac_cfg iwl9560_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
.integrated = true,
.xtal_latency = 650,
};
-const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl9560_long_latency_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
.integrated = true,
.xtal_latency = 2820,
};
-const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
+const struct iwl_mac_cfg iwl9560_shared_clk_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_9000,
- .base_params = &iwl9000_base_params,
+ .base = &iwl9000_base,
.mq_rx_supported = true,
- .rf_id = true,
.integrated = true,
.xtal_latency = 670,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
-const char iwl9162_name[] = "Intel(R) Wireless-AC 9162";
-const char iwl9260_name[] = "Intel(R) Wireless-AC 9260";
-const char iwl9260_1_name[] = "Intel(R) Wireless-AC 9260-1";
-const char iwl9270_name[] = "Intel(R) Wireless-AC 9270";
-const char iwl9461_name[] = "Intel(R) Wireless-AC 9461";
-const char iwl9462_name[] = "Intel(R) Wireless-AC 9462";
-const char iwl9560_name[] = "Intel(R) Wireless-AC 9560";
-const char iwl9162_160_name[] = "Intel(R) Wireless-AC 9162 160MHz";
-const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
-const char iwl9270_160_name[] = "Intel(R) Wireless-AC 9270 160MHz";
-const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz";
-const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
-const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
-
-const char iwl9260_killer_1550_name[] =
- "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
-const char iwl9560_killer_1550i_name[] =
- "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
-const char iwl9560_killer_1550i_160_name[] =
- "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
-const char iwl9560_killer_1550s_name[] =
- "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
-const char iwl9560_killer_1550s_160_name[] =
- "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
-
-const struct iwl_cfg iwl9260_2ac_cfg = {
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_soc = {
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
-};
-
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index e87b57b9e2c0..3bf9fdbe01c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -15,14 +15,7 @@
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 77
-/* NVM versions */
-#define IWL_AX210_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL_AX210_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_AX210_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_AX210_DCCM2_OFFSET 0x880000
-#define IWL_AX210_DCCM2_LEN 0x8000
#define IWL_AX210_SMEM_OFFSET 0x400000
#define IWL_AX210_SMEM_LEN 0xD0000
@@ -47,8 +40,7 @@
#define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_ax210_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_ax210_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -57,74 +49,60 @@ static const struct iwl_base_params iwl_ax210_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_AX210_SMEM_OFFSET,
+ .smem_len = IWL_AX210_SMEM_LEN,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x380,
+ .min_umac_error_event_table = 0x400000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .ucode_api_min = IWL_AX210_UCODE_API_MIN,
+ .ucode_api_max = IWL_AX210_UCODE_API_MAX,
};
-#define IWL_DEVICE_AX210_COMMON \
- .ucode_api_min = IWL_AX210_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_AX210_DCCM_OFFSET, \
- .dccm_len = IWL_AX210_DCCM_LEN, \
- .dccm2_offset = IWL_AX210_DCCM2_OFFSET, \
- .dccm2_len = IWL_AX210_DCCM2_LEN, \
- .smem_offset = IWL_AX210_SMEM_OFFSET, \
- .smem_len = IWL_AX210_SMEM_LEN, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x380, \
- .ht_params = &iwl_22000_ht_params, \
- .nvm_ver = IWL_AX210_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0x400000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_AX210 \
- IWL_DEVICE_AX210_COMMON, \
- .ucode_api_max = IWL_AX210_UCODE_API_MAX, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_AX210, \
- .trans.base_params = &iwl_ax210_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }
+const struct iwl_mac_cfg iwl_ty_mac_cfg = {
+ .mq_rx_supported = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base = &iwl_ax210_base,
+ .umac_prph_offset = 0x300000,
+ /* TODO: the following values need to be checked */
+ .xtal_latency = 500,
+};
-const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
+const struct iwl_mac_cfg iwl_so_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.umac_prph_offset = 0x300000,
.integrated = true,
/* TODO: the following values need to be checked */
@@ -132,12 +110,11 @@ const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
-const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
+const struct iwl_mac_cfg iwl_so_long_latency_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.umac_prph_offset = 0x300000,
.integrated = true,
.low_latency_xtal = true,
@@ -145,12 +122,11 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
+const struct iwl_mac_cfg iwl_so_long_latency_imr_mac_cfg = {
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.umac_prph_offset = 0x300000,
.integrated = true,
.low_latency_xtal = true,
@@ -159,107 +135,15 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = {
.imr_enabled = true,
};
-/*
- * If the device doesn't support HE, no need to have that many buffers.
- * AX210 devices can split multiple frames into a single RB, so fewer are
- * needed; AX210 cannot (but use smaller RBs by default) - these sizes
- * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with
- * additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_NON_HE 512
-#define IWL_NUM_RBDS_AX210_HE 4096
-
-const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
+const struct iwl_mac_cfg iwl_ma_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_AX210,
- .base_params = &iwl_ax210_base_params,
+ .base = &iwl_ax210_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000
};
-const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
-const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
-const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
-
-const char iwl_ax210_killer_1675w_name[] =
- "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)";
-const char iwl_ax210_killer_1675x_name[] =
- "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)";
-const char iwl_ax211_killer_1675s_name[] =
- "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)";
-const char iwl_ax211_killer_1675i_name[] =
- "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)";
-const char iwl_ax411_killer_1690s_name[] =
- "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)";
-const char iwl_ax411_killer_1690i_name[] =
- "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)";
-
-const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_SO_A_JF_B_FW_PRE,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
- .name = iwl_ax211_name,
- .fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
- .name = iwl_ax211_name,
- .fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
- .trans.xtal_latency = 12000,
- .trans.low_latency_xtal = true,
-};
-
-const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX210 160MHz",
- .fw_name_pre = IWL_TY_A_GF_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
- .name = iwl_ax411_name,
- .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
- .name = iwl_ax411_name,
- .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
- .trans.xtal_latency = 12000,
- .trans.low_latency_xtal = true,
-};
-
-const struct iwl_cfg iwl_cfg_ma = {
- .fw_name_mac = "ma",
- .uhb_supported = true,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
-const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = {
- .fw_name_pre = IWL_SO_A_HR_B_FW_PRE,
- IWL_DEVICE_AX210,
- .num_rbds = IWL_NUM_RBDS_AX210_HE,
-};
-
MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
IWL_FW_AND_PNVM(IWL_SO_A_GF_A_FW_PRE, IWL_AX210_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index f055255a7c93..05e45fff8b36 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,19 +10,12 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 98
+#define IWL_BZ_UCODE_API_MAX 99
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 93
-/* NVM versions */
-#define IWL_BZ_NVM_VERSION 0x0a1d
-
/* Memory offsets and lengths */
-#define IWL_BZ_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_BZ_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_BZ_DCCM2_OFFSET 0x880000
-#define IWL_BZ_DCCM2_LEN 0x8000
#define IWL_BZ_SMEM_OFFSET 0x400000
#define IWL_BZ_SMEM_LEN 0xD0000
@@ -38,13 +31,7 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-#if !IS_ENABLED(CONFIG_IWLMVM)
-const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
-const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
-#endif
-
-static const struct iwl_base_params iwl_bz_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_bz_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -53,91 +40,55 @@ static const struct iwl_base_params iwl_bz_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_BZ_SMEM_OFFSET,
+ .smem_len = IWL_BZ_SMEM_LEN,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x30,
+ .min_umac_error_event_table = 0xD0000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .mon_dbgi_regs = {
+ .write_ptr = {
+ .addr = DBGI_SRAM_FIFO_POINTERS,
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,
+ },
+ },
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .ucode_api_max = IWL_BZ_UCODE_API_MAX,
+ .ucode_api_min = IWL_BZ_UCODE_API_MIN,
};
-const struct iwl_ht_params iwl_bz_ht_params = {
- .stbc = true,
- .ldpc = true,
- .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
- BIT(NL80211_BAND_6GHZ),
-};
-
-#define IWL_DEVICE_BZ_COMMON \
- .ucode_api_max = IWL_BZ_UCODE_API_MAX, \
- .ucode_api_min = IWL_BZ_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_BZ_DCCM_OFFSET, \
- .dccm_len = IWL_BZ_DCCM_LEN, \
- .dccm2_offset = IWL_BZ_DCCM2_OFFSET, \
- .dccm2_len = IWL_BZ_DCCM2_LEN, \
- .smem_offset = IWL_BZ_SMEM_OFFSET, \
- .smem_len = IWL_BZ_SMEM_LEN, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x30, \
- .nvm_ver = IWL_BZ_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0xD0000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_BZ, \
- .trans.base_params = &iwl_bz_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }, \
- .mon_dbgi_regs = { \
- .write_ptr = { \
- .addr = DBGI_SRAM_FIFO_POINTERS, \
- .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_BZ \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_bz_ht_params
-
-/*
- * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_BZ_EHT (512 * 16)
-
-const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
+const struct iwl_mac_cfg iwl_bz_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_BZ,
- .base_params = &iwl_bz_base_params,
+ .base = &iwl_bz_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
@@ -146,38 +97,16 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
+const struct iwl_mac_cfg iwl_gl_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_BZ,
- .base_params = &iwl_bz_base_params,
+ .base = &iwl_bz_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.umac_prph_offset = 0x300000,
.xtal_latency = 12000,
.low_latency_xtal = true,
};
-const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
-const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
-const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
-const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
-
-const struct iwl_cfg iwl_cfg_bz = {
- .fw_name_mac = "bz",
- .uhb_supported = true,
- IWL_DEVICE_BZ,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_BZ_EHT,
-};
-
-const struct iwl_cfg iwl_cfg_gl = {
- .fw_name_mac = "gl",
- .uhb_supported = true,
- IWL_DEVICE_BZ,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_BZ_EHT,
-};
-
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_BZ_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index 282b9b846c3a..45e55cef42ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -9,35 +9,21 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 98
+#define IWL_DR_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_DR_UCODE_API_MIN 96
-
-/* NVM versions */
-#define IWL_DR_NVM_VERSION 0x0a1d
+#define IWL_DR_UCODE_API_MIN 97
/* Memory offsets and lengths */
-#define IWL_DR_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_DR_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_DR_DCCM2_OFFSET 0x880000
-#define IWL_DR_DCCM2_LEN 0x8000
#define IWL_DR_SMEM_OFFSET 0x400000
#define IWL_DR_SMEM_LEN 0xD0000
#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
-#define IWL_BR_A_PET_A_FW_PRE "iwlwifi-br-a0-petc-a0"
-#define IWL_BR_A_PE_A_FW_PRE "iwlwifi-br-a0-pe-a0"
#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(api) \
- IWL_BR_A_PET_A_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(api) \
- IWL_BR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_dr_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_dr_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -46,87 +32,55 @@ static const struct iwl_base_params iwl_dr_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_DR_SMEM_OFFSET,
+ .smem_len = IWL_DR_SMEM_LEN,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x30,
+ .min_umac_error_event_table = 0xD0000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .mon_dbgi_regs = {
+ .write_ptr = {
+ .addr = DBGI_SRAM_FIFO_POINTERS,
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,
+ },
+ },
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .ucode_api_max = IWL_DR_UCODE_API_MAX,
+ .ucode_api_min = IWL_DR_UCODE_API_MIN,
};
-#define IWL_DEVICE_DR_COMMON \
- .ucode_api_max = IWL_DR_UCODE_API_MAX, \
- .ucode_api_min = IWL_DR_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_DR_DCCM_OFFSET, \
- .dccm_len = IWL_DR_DCCM_LEN, \
- .dccm2_offset = IWL_DR_DCCM2_OFFSET, \
- .dccm2_len = IWL_DR_DCCM2_LEN, \
- .smem_offset = IWL_DR_SMEM_OFFSET, \
- .smem_len = IWL_DR_SMEM_LEN, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x30, \
- .nvm_ver = IWL_DR_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0xD0000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_DR, \
- .trans.base_params = &iwl_dr_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }, \
- .mon_dbgi_regs = { \
- .write_ptr = { \
- .addr = DBGI_SRAM_FIFO_POINTERS, \
- .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_DR \
- IWL_DEVICE_DR_COMMON, \
- .uhb_supported = true, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .num_rbds = IWL_NUM_RBDS_DR_EHT, \
- .ht_params = &iwl_bz_ht_params
-
-/*
- * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_DR_EHT (512 * 16)
-
-const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
+const struct iwl_mac_cfg iwl_dr_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_DR,
- .base_params = &iwl_dr_base_params,
+ .base = &iwl_dr_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
@@ -135,31 +89,5 @@ const struct iwl_cfg_trans_params iwl_dr_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const char iwl_dr_name[] = "Intel(R) TBD Dr device";
-
-const struct iwl_cfg iwl_cfg_dr = {
- .fw_name_mac = "dr",
- IWL_DEVICE_DR,
-};
-
-const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
- .device_family = IWL_DEVICE_FAMILY_DR,
- .base_params = &iwl_dr_base_params,
- .mq_rx_supported = true,
- .rf_id = true,
- .gen2 = true,
- .umac_prph_offset = 0x300000,
- .xtal_latency = 12000,
- .low_latency_xtal = true,
-};
-
-const char iwl_br_name[] = "Intel(R) TBD Br device";
-
-const struct iwl_cfg iwl_cfg_br = {
- .fw_name_mac = "br",
- IWL_DEVICE_DR,
-};
-
MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BR_A_PET_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_BR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
new file mode 100644
index 000000000000..456a666c8dfd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_FM_NVM_VERSION 0x0a1d
+
+#define IWL_DEVICE_FM \
+ .ht_params = { \
+ .stbc = true, \
+ .ldpc = true, \
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | \
+ BIT(NL80211_BAND_5GHZ), \
+ }, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .non_shared_ant = ANT_B, \
+ .vht_mu_mimo_supported = true, \
+ .uhb_supported = true, \
+ .num_rbds = IWL_NUM_RBDS_EHT, \
+ .nvm_ver = IWL_FM_NVM_VERSION, \
+ .nvm_type = IWL_NVM_EXT
+
+const struct iwl_rf_cfg iwl_rf_fm = {
+ IWL_DEVICE_FM,
+};
+
+const struct iwl_rf_cfg iwl_rf_fm_160mhz = {
+ IWL_DEVICE_FM,
+ .bw_limit = 160,
+};
+
+const char iwl_killer_be1750s_name[] =
+ "Killer(R) Wi-Fi 7 BE1750s 320MHz Wireless Network Adapter (BE201D2W)";
+const char iwl_killer_be1750i_name[] =
+ "Killer(R) Wi-Fi 7 BE1750i 320MHz Wireless Network Adapter (BE201NGW)";
+const char iwl_killer_be1750w_name[] =
+ "Killer(TM) Wi-Fi 7 BE1750w 320MHz Wireless Network Adapter (BE200D2W)";
+const char iwl_killer_be1750x_name[] =
+ "Killer(TM) Wi-Fi 7 BE1750x 320MHz Wireless Network Adapter (BE200NGW)";
+const char iwl_killer_be1790s_name[] =
+ "Killer(R) Wi-Fi 7 BE1790s 320MHz Wireless Network Adapter (BE401D2W)";
+const char iwl_killer_be1790i_name[] =
+ "Killer(R) Wi-Fi 7 BE1790i 320MHz Wireless Network Adapter (BE401NGW)";
+
+const char iwl_be201_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_be200_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
+const char iwl_be202_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
+const char iwl_be401_name[] = "Intel(R) Wi-Fi 7 BE401 320MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
new file mode 100644
index 000000000000..f55c286e83be
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_GF_NVM_VERSION 0x0a1d
+
+const struct iwl_rf_cfg iwl_rf_gf = {
+ .uhb_supported = true,
+ .led_mode = IWL_LED_RF_STATE,
+ .non_shared_ant = ANT_B,
+ .vht_mu_mimo_supported = true,
+ .ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ },
+ .nvm_ver = IWL_GF_NVM_VERSION,
+ .nvm_type = IWL_NVM_EXT,
+ .num_rbds = IWL_NUM_RBDS_HE,
+};
+
+const char iwl_ax210_killer_1675w_name[] =
+ "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)";
+const char iwl_ax210_killer_1675x_name[] =
+ "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)";
+const char iwl_ax211_killer_1675s_name[] =
+ "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211D2W)";
+const char iwl_ax211_killer_1675i_name[] =
+ "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)";
+const char iwl_ax411_killer_1690s_name[] =
+ "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)";
+const char iwl_ax411_killer_1690i_name[] =
+ "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)";
+
+const char iwl_ax210_name[] = "Intel(R) Wi-Fi 6E AX210 160MHz";
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
+const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
new file mode 100644
index 000000000000..db02664e3917
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_HR_NVM_VERSION 0x0a1d
+
+#define IWL_DEVICE_HR \
+ .led_mode = IWL_LED_RF_STATE, \
+ .non_shared_ant = ANT_B, \
+ .vht_mu_mimo_supported = true, \
+ .ht_params = { \
+ .stbc = true, \
+ .ldpc = true, \
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | \
+ BIT(NL80211_BAND_5GHZ), \
+ }, \
+ .num_rbds = IWL_NUM_RBDS_HE, \
+ .nvm_ver = IWL_HR_NVM_VERSION, \
+ .nvm_type = IWL_NVM_EXT
+
+const struct iwl_rf_cfg iwl_rf_hr1 = {
+ IWL_DEVICE_HR,
+ .tx_with_siso_diversity = true,
+};
+
+const struct iwl_rf_cfg iwl_rf_hr = {
+ IWL_DEVICE_HR,
+};
+
+const struct iwl_rf_cfg iwl_rf_hr_80mhz = {
+ IWL_DEVICE_HR,
+ .bw_limit = 80,
+};
+
+const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
+const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
+const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c
new file mode 100644
index 000000000000..467eaeae6deb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2021, 2023, 2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* NVM versions */
+#define IWL_JF_NVM_VERSION 0x0a1d
+
+/* Memory offsets and lengths */
+#define IWL9000_DCCM_OFFSET 0x800000
+#define IWL9000_DCCM_LEN 0x18000
+#define IWL9000_DCCM2_OFFSET 0x880000
+#define IWL9000_DCCM2_LEN 0x8000
+
+static const struct iwl_tt_params iwl_jf_tt_params = {
+ .ct_kill_entry = 115,
+ .ct_kill_exit = 93,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 111,
+ .dynamic_smps_exit = 107,
+ .tx_protection_entry = 112,
+ .tx_protection_exit = 105,
+ .tx_backoff = {
+ {.temperature = 110, .backoff = 200},
+ {.temperature = 111, .backoff = 600},
+ {.temperature = 112, .backoff = 1200},
+ {.temperature = 113, .backoff = 2000},
+ {.temperature = 114, .backoff = 4000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+/* these values are ignored if not with Pu/Th MAC firmware, due to offload */
+#define IWL_DEVICE_JF_PU \
+ .dccm_offset = IWL9000_DCCM_OFFSET, \
+ .dccm_len = IWL9000_DCCM_LEN, \
+ .dccm2_offset = IWL9000_DCCM2_OFFSET, \
+ .dccm2_len = IWL9000_DCCM2_LEN, \
+ .thermal_params = &iwl_jf_tt_params
+
+#define IWL_DEVICE_JF \
+ IWL_DEVICE_JF_PU, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .non_shared_ant = ANT_B, \
+ .num_rbds = IWL_NUM_RBDS_NON_HE, \
+ .vht_mu_mimo_supported = true, \
+ .ht_params = { \
+ .stbc = true, \
+ .ldpc = true, \
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | \
+ BIT(NL80211_BAND_5GHZ), \
+ }, \
+ .nvm_ver = IWL_JF_NVM_VERSION, \
+ .nvm_type = IWL_NVM_EXT
+
+const struct iwl_rf_cfg iwl_rf_jf = {
+ IWL_DEVICE_JF,
+};
+
+const struct iwl_rf_cfg iwl_rf_jf_80mhz = {
+ IWL_DEVICE_JF,
+ .bw_limit = 80,
+};
+
+const char iwl9260_name[] = "Intel(R) Wireless-AC 9260";
+const char iwl9461_name[] = "Intel(R) Wireless-AC 9461";
+const char iwl9462_name[] = "Intel(R) Wireless-AC 9462";
+const char iwl9560_name[] = "Intel(R) Wireless-AC 9560";
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz";
+const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
+
+const char iwl9260_killer_1550_name[] =
+ "Killer(R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
+const char iwl9560_killer_1550i_name[] =
+ "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
+const char iwl9560_killer_1550s_name[] =
+ "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
new file mode 100644
index 000000000000..483f21659eff
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */
+
+const char iwl_killer_bn1850w2_name[] =
+ "Killer(R) Wi-Fi 8 BN1850w2 320MHz Wireless Network Adapter (BN201.D2W)";
+const char iwl_killer_bn1850i_name[] =
+ "Killer(R) Wi-Fi 8 BN1850i 320MHz Wireless Network Adapter (BN201.NGW)";
+
+const char iwl_bn201_name[] = "Intel(R) Wi-Fi 8 BN201";
+const char iwl_be221_name[] = "Intel(R) Wi-Fi 7 BE221";
+const char iwl_be223_name[] = "Intel(R) Wi-Fi 7 BE223";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c
new file mode 100644
index 000000000000..97735175cb0e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include "iwl-config.h"
+
+/* currently iwl_rf_wh/iwl_rf_wh_160mhz are just defines for the FM ones */
+
+const char iwl_killer_be1775s_name[] =
+ "Killer(R) Wi-Fi 7 BE1775s 320MHz Wireless Network Adapter (BE211D2W)";
+const char iwl_killer_be1775i_name[] =
+ "Killer(R) Wi-Fi 7 BE1775i 320MHz Wireless Network Adapter (BE211NGW)";
+
+const char iwl_be211_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
+const char iwl_be213_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 59af36960f9c..b2e4d4035296 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,19 +10,15 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 98
+#define IWL_SC_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 93
+#define IWL_SC_UCODE_API_MIN 97
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
/* Memory offsets and lengths */
-#define IWL_SC_DCCM_OFFSET 0x800000 /* LMAC1 */
-#define IWL_SC_DCCM_LEN 0x10000 /* LMAC1 */
-#define IWL_SC_DCCM2_OFFSET 0x880000
-#define IWL_SC_DCCM2_LEN 0x8000
#define IWL_SC_SMEM_OFFSET 0x400000
#define IWL_SC_SMEM_LEN 0xD0000
@@ -43,8 +39,7 @@
#define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
-static const struct iwl_base_params iwl_sc_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
+static const struct iwl_family_base_params iwl_sc_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -53,87 +48,55 @@ static const struct iwl_base_params iwl_sc_base_params = {
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
+ .smem_offset = IWL_SC_SMEM_OFFSET,
+ .smem_len = IWL_SC_SMEM_LEN,
+ .apmg_not_supported = true,
+ .mac_addr_from_csr = 0x30,
+ .min_umac_error_event_table = 0xD0000,
+ .d3_debug_data_base_addr = 0x401000,
+ .d3_debug_data_length = 60 * 1024,
+ .mon_smem_regs = {
+ .write_ptr = {
+ .addr = LDBG_M2S_BUF_WPTR,
+ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK,
+ },
+ .cycle_cnt = {
+ .addr = LDBG_M2S_BUF_WRAP_CNT,
+ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK,
+ },
+ },
+ .min_txq_size = 128,
+ .gp2_reg_addr = 0xd02c68,
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT,
+ .mon_dram_regs = {
+ .write_ptr = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK,
+ },
+ .cycle_cnt = {
+ .addr = DBGC_DBGBUF_WRAP_AROUND,
+ .mask = 0xffffffff,
+ },
+ .cur_frag = {
+ .addr = DBGC_CUR_DBGBUF_STATUS,
+ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK,
+ },
+ },
+ .mon_dbgi_regs = {
+ .write_ptr = {
+ .addr = DBGI_SRAM_FIFO_POINTERS,
+ .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK,
+ },
+ },
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .ucode_api_max = IWL_SC_UCODE_API_MAX,
+ .ucode_api_min = IWL_SC_UCODE_API_MIN,
};
-#define IWL_DEVICE_BZ_COMMON \
- .ucode_api_max = IWL_SC_UCODE_API_MAX, \
- .ucode_api_min = IWL_SC_UCODE_API_MIN, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = 10, \
- .non_shared_ant = ANT_B, \
- .dccm_offset = IWL_SC_DCCM_OFFSET, \
- .dccm_len = IWL_SC_DCCM_LEN, \
- .dccm2_offset = IWL_SC_DCCM2_OFFSET, \
- .dccm2_len = IWL_SC_DCCM2_LEN, \
- .smem_offset = IWL_SC_SMEM_OFFSET, \
- .smem_len = IWL_SC_SMEM_LEN, \
- .apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
- .vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = 0x30, \
- .nvm_ver = IWL_SC_NVM_VERSION, \
- .trans.rf_id = true, \
- .trans.gen2 = true, \
- .nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true, \
- .min_umac_error_event_table = 0xD0000, \
- .d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024, \
- .mon_smem_regs = { \
- .write_ptr = { \
- .addr = LDBG_M2S_BUF_WPTR, \
- .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = LDBG_M2S_BUF_WRAP_CNT, \
- .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
- }, \
- }, \
- .trans.umac_prph_offset = 0x300000, \
- .trans.device_family = IWL_DEVICE_FAMILY_SC, \
- .trans.base_params = &iwl_sc_base_params, \
- .min_txq_size = 128, \
- .gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
- .mon_dram_regs = { \
- .write_ptr = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
- }, \
- .cycle_cnt = { \
- .addr = DBGC_DBGBUF_WRAP_AROUND, \
- .mask = 0xffffffff, \
- }, \
- .cur_frag = { \
- .addr = DBGC_CUR_DBGBUF_STATUS, \
- .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
- }, \
- }, \
- .mon_dbgi_regs = { \
- .write_ptr = { \
- .addr = DBGI_SRAM_FIFO_POINTERS, \
- .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \
- }, \
- }
-
-#define IWL_DEVICE_SC \
- IWL_DEVICE_BZ_COMMON, \
- .uhb_supported = true, \
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
- .num_rbds = IWL_NUM_RBDS_SC_EHT, \
- .ht_params = &iwl_bz_ht_params
-
-/*
- * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
- * A-MPDU, with additional overhead to account for processing time.
- */
-#define IWL_NUM_RBDS_SC_EHT (512 * 16)
-
-const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
+const struct iwl_mac_cfg iwl_sc_mac_cfg = {
.device_family = IWL_DEVICE_FAMILY_SC,
- .base_params = &iwl_sc_base_params,
+ .base = &iwl_sc_base,
.mq_rx_supported = true,
- .rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
@@ -142,21 +105,6 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const struct iwl_cfg iwl_cfg_sc = {
- .fw_name_mac = "sc",
- IWL_DEVICE_SC,
-};
-
-const struct iwl_cfg iwl_cfg_sc2 = {
- .fw_name_mac = "sc2",
- IWL_DEVICE_SC,
-};
-
-const struct iwl_cfg iwl_cfg_sc2f = {
- .fw_name_mac = "sc2f",
- IWL_DEVICE_SC,
-};
-
IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index a13add556a7b..1ebc7effcc2a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2021, 2024-2025 Intel Corporation
*/
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
@@ -399,7 +399,7 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
* later with iwl_free_nvm_data().
*/
struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const u8 *eeprom, size_t eeprom_size);
int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 2ed4b6e798ab..ec94c43ba28c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2025 Intel Corporation
*****************************************************************************/
#include <linux/slab.h>
@@ -2097,7 +2097,8 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
char buf[40];
const size_t bufsz = sizeof(buf);
- if (priv->cfg->ht_params)
+ /* HT devices also have at least one HT40 band */
+ if (priv->cfg->ht_params.ht40_bands)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
(priv->hw_params.use_rts_for_aggregation) ?
@@ -2117,7 +2118,8 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
int buf_size;
int rts;
- if (!priv->cfg->ht_params)
+ /* HT devices also have at least one HT40 band */
+ if (!priv->cfg->ht_params.ht40_bands)
return -EINVAL;
memset(buf, 0, sizeof(buf));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 4ac8b862ad41..25b24820466d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -2,6 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014, 2020, 2023 Intel Corporation. All rights reserved.
+ * Copyright (C) 2025 Intel Corporation
*****************************************************************************/
/*
* Please use this file (dev.h) for driver implementation definitions.
@@ -627,7 +628,7 @@ struct iwl_priv {
struct iwl_trans *trans;
struct device *dev; /* for debug prints only */
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
const struct iwl_fw *fw;
const struct iwl_dvm_cfg *lib;
unsigned long status;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index 48a8349680fc..3447ae0b160a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2025 Intel Corporation
*****************************************************************************/
#include <linux/units.h>
@@ -481,7 +481,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
- switch (priv->trans->trans_cfg->device_family) {
+ switch (priv->trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_6005:
case IWL_DEVICE_FAMILY_6030:
case IWL_DEVICE_FAMILY_6000:
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index cdc05f7e75a6..2423125e5284 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021, 2024-2025 Intel Corporation
*/
#include <linux/types.h>
#include <linux/slab.h>
@@ -151,7 +151,7 @@ static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
{
if (WARN_ON(offset + sizeof(u16) > eeprom_size))
return 0;
- return le16_to_cpup((__le16 *)(eeprom + offset));
+ return le16_to_cpup((const __le16 *)(eeprom + offset));
}
static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
@@ -204,8 +204,8 @@ static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
return (address & ADDRESS_MSK) + (offset << 1);
}
-static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
- u32 offset)
+static const void *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
+ u32 offset)
{
u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
@@ -218,10 +218,9 @@ static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
struct iwl_nvm_data *data)
{
- struct iwl_eeprom_calib_hdr *hdr;
+ const struct iwl_eeprom_calib_hdr *hdr;
- hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_CALIB_ALL);
+ hdr = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_CALIB_ALL);
if (!hdr)
return -ENODATA;
data->calib_version = hdr->version;
@@ -295,7 +294,7 @@ struct iwl_eeprom_enhanced_txpwr {
} __packed;
static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
- struct iwl_eeprom_enhanced_txpwr *txp)
+ const struct iwl_eeprom_enhanced_txpwr *txp)
{
s8 result = 0; /* (.5 dBm) */
@@ -329,7 +328,7 @@ static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
static void
iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
- struct iwl_eeprom_enhanced_txpwr *txp,
+ const struct iwl_eeprom_enhanced_txpwr *txp,
int n_channels, s8 max_txpower_avg)
{
int ch_idx;
@@ -360,20 +359,18 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev,
const u8 *eeprom, size_t eeprom_size,
int n_channels)
{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ const struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
- __le16 *txp_len;
+ const __le16 *txp_len;
s8 max_txp_avg_halfdbm;
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_TXP_SZ_OFFS);
+ txp_len = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_TXP_OFFS);
+ txp_array = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
@@ -416,7 +413,7 @@ static void iwl_eeprom_enhanced_txpower(struct device *dev,
}
}
-static void iwl_init_band_reference(const struct iwl_cfg *cfg,
+static void iwl_init_band_reference(const struct iwl_rf_cfg *cfg,
const u8 *eeprom, size_t eeprom_size,
int eeprom_band, int *eeprom_ch_count,
const struct iwl_eeprom_channel **ch_info,
@@ -426,7 +423,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
- *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
+ *ch_info = iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
switch (eeprom_band) {
case 1: /* 2.4GHz band */
@@ -510,7 +507,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
#define CHECK_AND_PRINT_I(x) \
((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
-static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+static int iwl_init_channel_map(struct device *dev, const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
@@ -749,7 +746,7 @@ static int iwl_nvm_is_otp(struct iwl_trans *trans)
u32 otpgp;
/* OTP only valid for CP/PP and after */
- switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ switch (trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
IWL_ERR(trans, "Unknown hardware type\n");
return -EIO;
@@ -784,7 +781,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
- if (trans->trans_cfg->base_params->shadow_ram_support)
+ if (trans->mac_cfg->base->shadow_ram_support)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -905,7 +902,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
+ } while (usedblocks <= trans->mac_cfg->base->max_ll_items);
/* OTP has no valid blocks */
IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
@@ -938,7 +935,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
if (nvm_is_otp < 0)
return nvm_is_otp;
- sz = trans->trans_cfg->base_params->eeprom_size;
+ sz = trans->mac_cfg->base->eeprom_size;
IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
e = kmalloc(sz, GFP_KERNEL);
@@ -973,7 +970,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
- if (!trans->trans_cfg->base_params->shadow_ram_support) {
+ if (!trans->mac_cfg->base->shadow_ram_support) {
ret = iwl_find_otp_image(trans, &validblockaddr);
if (ret)
goto err_unlock;
@@ -1027,7 +1024,7 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
return ret;
}
-static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
@@ -1062,7 +1059,7 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
/* EEPROM data functions */
struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const u8 *eeprom, size_t eeprom_size)
{
struct iwl_nvm_data *data;
@@ -1098,14 +1095,14 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
EEPROM_RAW_TEMPERATURE);
if (!tmp)
goto err_free;
- data->raw_temperature = *(__le16 *)tmp;
+ data->raw_temperature = *(const __le16 *)tmp;
tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_KELVIN_TEMPERATURE);
if (!tmp)
goto err_free;
- data->kelvin_temperature = *(__le16 *)tmp;
- data->kelvin_voltage = *((__le16 *)tmp + 1);
+ data->kelvin_temperature = *(const __le16 *)tmp;
+ data->kelvin_voltage = *((const __le16 *)tmp + 1);
radio_cfg =
iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 5ca85d90a8d6..cec2ebdfd651 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2025 Intel Corporation
*****************************************************************************/
@@ -116,9 +116,9 @@ static int iwl_led_cmd(struct iwl_priv *priv,
}
led_cmd.on = iwl_blink_compensation(priv, on,
- priv->trans->trans_cfg->base_params->led_compensation);
+ priv->trans->mac_cfg->base->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
- priv->trans->trans_cfg->base_params->led_compensation);
+ priv->trans->mac_cfg->base->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 56d19a034c24..0771a46bd552 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation
+ * Copyright(C) 2018 - 2019, 2022 - 2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -96,7 +96,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- if (priv->trans->max_skb_frags)
+ if (priv->trans->info.max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
@@ -188,7 +188,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&priv->nvm_data->bands[NL80211_BAND_5GHZ];
- hw->wiphy->hw_version = priv->trans->hw_id;
+ hw->wiphy->hw_version = priv->trans->info.hw_id;
iwl_leds_init(priv);
@@ -549,7 +549,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
iwlagn_prepare_restart(priv);
- memset((void *)&ctx->active, 0, sizeof(ctx->active));
+ memset((void *)(uintptr_t)&ctx->active, 0, sizeof(ctx->active));
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
@@ -1091,7 +1091,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto done;
}
- scd_queues = BIT(priv->trans->trans_cfg->base_params->num_of_queues) - 1;
+ scd_queues = BIT(priv->trans->mac_cfg->base->num_of_queues) - 1;
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index a27a72cc017a..1d619384c629 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
- * Copyright(c) 2024 Intel Corporation. All rights reserved.
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
@@ -824,11 +824,11 @@ int iwl_alive_start(struct iwl_priv *priv)
iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
- struct iwl_rxon_cmd *active_rxon =
- (struct iwl_rxon_cmd *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
+
/* apply any changes in staging */
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ active->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
struct iwl_rxon_context *tmp;
/* Initialize our rx_config data */
@@ -1137,9 +1137,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
static void iwl_set_hw_params(struct iwl_priv *priv)
{
- if (priv->cfg->ht_params)
+ /* there are no devices with HT but without HT40 on all bands */
+ if (priv->cfg->ht_params.ht40_bands)
priv->hw_params.use_rts_for_aggregation =
- priv->cfg->ht_params->use_rts_for_aggregation;
+ priv->cfg->ht_params.use_rts_for_aggregation;
/* Device-specific setup */
priv->lib->set_hw_params(priv);
@@ -1173,8 +1174,9 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
struct iwl_nvm_data *data = priv->nvm_data;
+ /* all HT devices also have HT40 on at least one band */
if (data->sku_cap_11n_enable &&
- !priv->cfg->ht_params) {
+ !priv->cfg->ht_params.ht40_bands) {
IWL_ERR(priv, "Invalid 11n configuration\n");
return -EINVAL;
}
@@ -1224,7 +1226,7 @@ static int iwl_nvm_check_version(struct iwl_nvm_data *data,
}
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
struct dentry *dbgfs_dir)
{
@@ -1233,7 +1235,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
struct iwl_op_mode *op_mode;
u16 num_mac;
u32 ucode_flags;
- struct iwl_trans_config trans_cfg = {};
static const u8 no_reclaim_cmds[] = {
REPLY_RX_PHY_CMD,
REPLY_RX_MPDU_CMD,
@@ -1248,7 +1249,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
hw = iwl_alloc_all();
if (!hw) {
- pr_err("%s: Cannot allocate network device\n", trans->name);
+ pr_err("%s: Cannot allocate network device\n",
+ trans->info.name);
err = -ENOMEM;
goto out;
}
@@ -1261,7 +1263,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
priv->cfg = cfg;
priv->fw = fw;
- switch (priv->trans->trans_cfg->device_family) {
+ switch (priv->trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_1000:
case IWL_DEVICE_FAMILY_100:
priv->lib = &iwl_dvm_1000_cfg;
@@ -1309,52 +1311,50 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* Populate the state variables that the transport layer needs
* to know about.
*/
- trans_cfg.op_mode = op_mode;
- trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
- trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
case IWL_AMSDU_4K:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ trans->conf.rx_buf_size = IWL_AMSDU_4K;
break;
case IWL_AMSDU_8K:
- trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+ trans->conf.rx_buf_size = IWL_AMSDU_8K;
break;
case IWL_AMSDU_12K:
default:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+ trans->conf.rx_buf_size = IWL_AMSDU_4K;
pr_err("Unsupported amsdu_size: %d\n",
iwlwifi_mod_params.amsdu_size);
}
- trans_cfg.command_groups = iwl_dvm_groups;
- trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
+ trans->conf.command_groups = iwl_dvm_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
- trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
- trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
- driver_data[2]);
+ trans->conf.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
- priv->trans->trans_cfg->base_params->num_of_queues);
+ priv->trans->mac_cfg->base->num_of_queues);
ucode_flags = fw->ucode_capa.flags;
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
- trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+ trans->conf.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
} else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
- trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+ trans->conf.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
- /* Configure transport layer */
- iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
- trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
- trans->command_groups = trans_cfg.command_groups;
- trans->command_groups_size = trans_cfg.command_groups_size;
+ iwl_trans_op_mode_enter(priv->trans, op_mode);
/* At this point both hw and priv are allocated. */
@@ -1378,18 +1378,18 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* 2. Read REV register
***********************/
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->trans->name, priv->trans->hw_rev);
+ priv->trans->info.name, priv->trans->info.hw_rev);
err = iwl_trans_start_hw(priv->trans);
if (err)
- goto out_free_hw;
+ goto out_leave_trans;
/* Read the EEPROM */
err = iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
&priv->eeprom_blob_size);
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_free_hw;
+ goto out_leave_trans;
}
/* Reset chip to save power until we load uCode during "up". */
@@ -1437,10 +1437,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* packaging bug or due to the eeprom check above
*/
priv->sta_key_max_num = STA_KEY_MAX_NUM;
- trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
- /* Configure transport layer again*/
- iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->conf.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
/*******************
@@ -1508,6 +1505,8 @@ out_free_eeprom_blob:
kfree(priv->eeprom_blob);
out_free_eeprom:
kfree(priv->nvm_data);
+out_leave_trans:
+ iwl_trans_op_mode_leave(priv->trans);
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
@@ -1992,7 +1991,7 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
/* SKU Control */
iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH,
- CSR_HW_REV_STEP_DASH(priv->trans->hw_rev));
+ CSR_HW_REV_STEP_DASH(priv->trans->info.hw_rev));
/* write radio config values to register */
if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 6d16a7105656..6b42d6e5f30f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019, 2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -64,32 +64,32 @@ struct iwl_power_vec_entry {
/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
/* DTIM 0 - 2 */
static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
- {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
- {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
- {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
+ {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF), 0}, 1},
+ {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF), 0}, 2}
};
/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
/* DTIM 3 - 10 */
static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
- {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
- {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
- {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
+ {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4), 0}, 0},
+ {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10), 0}, 1},
+ {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10), 0}, 2}
};
/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
/* DTIM 11 - */
static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
- {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
- {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
- {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
- {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
+ {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF), 0}, 0},
+ {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF), 0}, 0}
};
/* advance power management */
@@ -196,7 +196,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
- if (priv->trans->trans_cfg->base_params->shadow_reg_enable)
+ if (priv->trans->mac_cfg->base->shadow_reg_enable)
cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 7f67e602940c..5f8b60824043 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018, 2020-2021 Intel Corporation
+ * Copyright(c) 2018, 2020-2021, 2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
@@ -50,7 +50,7 @@ static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
* See iwlagn_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
+ struct iwl_rxon_cmd *rxon = (void *)(uintptr_t)&ctx->active;
if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
return;
@@ -643,8 +643,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
fraglen = len - hdrlen;
if (fraglen) {
- int offset = (void *)hdr + hdrlen -
- rxb_addr(rxb) + rxb_offset(rxb);
+ int offset = (u8 *)hdr + hdrlen -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index f80cce37e2c0..2d3c1627f283 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2025 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*****************************************************************************/
@@ -341,7 +341,7 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
- struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
@@ -441,7 +441,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret;
- struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
/* RXON timing must be before associated RXON */
if (ctx->ctxid == IWL_RXON_CTX_BSS) {
@@ -1023,7 +1023,7 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
/* cast away the const for active_rxon in this function */
- struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ struct iwl_rxon_cmd *active = (void *)(uintptr_t)&ctx->active;
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 111ed1873006..24fefa0e8148 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -3,7 +3,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023, 2025 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
@@ -463,7 +463,7 @@ static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
int q;
for (q = IWLAGN_FIRST_AMPDU_QUEUE;
- q < priv->trans->trans_cfg->base_params->num_of_queues; q++) {
+ q < priv->trans->mac_cfg->base->num_of_queues; q++) {
if (!test_and_set_bit(q, priv->agg_q_alloc)) {
priv->queue_to_mac80211[q] = mq;
return q;
@@ -1277,7 +1277,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
- if (scd_flow >= priv->trans->trans_cfg->base_params->num_of_queues) {
+ if (scd_flow >= priv->trans->mac_cfg->base->num_of_queues) {
IWL_ERR(priv,
"BUG_ON scd_flow is bigger than number of queues\n");
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index bb13ca5d666c..ac90191a3973 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -3,6 +3,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2025 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
@@ -222,7 +223,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
int ret;
int i;
- iwl_trans_fw_alive(priv->trans, 0);
+ iwl_trans_fw_alive(priv->trans);
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
priv->nvm_data->sku_cap_ipan_enable) {
@@ -293,15 +294,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
{
struct iwl_notification_wait alive_wait;
struct iwl_alive_data alive_data;
- const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type;
static const u16 alive_cmd[] = { REPLY_ALIVE };
- fw = iwl_get_ucode_image(priv->fw, ucode_type);
- if (WARN_ON(!fw))
- return -EINVAL;
-
old_type = priv->cur_ucode;
priv->cur_ucode = ucode_type;
priv->ucode_loaded = false;
@@ -310,7 +306,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data);
- ret = iwl_trans_start_fw(priv->trans, fw, false);
+ ret = iwl_trans_start_fw(priv->trans, priv->fw, ucode_type, false);
if (ret) {
priv->cur_ucode = old_type;
iwl_remove_notification(&priv->notif_wait, &alive_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index efa7b673ebc7..bee7d92293b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2024 Intel Corporation
+ * Copyright (C) 2019-2025 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -847,12 +847,12 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
+ /* try to read ppag table rev 1 to 4 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev >= 1 && tbl_rev <= 3) {
+ if (tbl_rev >= 1 && tbl_rev <= 4) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
"Reading PPAG table (tbl_rev=%d)\n",
@@ -882,7 +882,7 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto out_free;
read_table:
- fwrt->ppag_ver = tbl_rev;
+ fwrt->ppag_bios_rev = tbl_rev;
flags = &wifi_pkg->package.elements[1];
if (flags->type != ACPI_TYPE_INTEGER) {
@@ -891,7 +891,7 @@ read_table:
}
fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
- fwrt->ppag_ver);
+ fwrt->ppag_bios_rev);
/*
* read, verify gain values and save them into the PPAG table.
@@ -912,6 +912,7 @@ read_table:
}
}
+ fwrt->ppag_bios_source = BIOS_SOURCE_ACPI;
ret = 0;
out_free:
@@ -919,40 +920,39 @@ out_free:
return ret;
}
-void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt)
{
+ struct iwl_phy_specific_cfg *filters = &fwrt->phy_filters;
struct iwl_phy_specific_cfg tmp = {};
- union acpi_object *wifi_pkg, *data;
+ union acpi_object *wifi_pkg, *data __free(kfree);
int tbl_rev, i;
data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD);
if (IS_ERR(data))
- return;
+ return PTR_ERR(data);
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg))
- goto out_free;
+ return PTR_ERR(wifi_pkg);
if (tbl_rev != 0)
- goto out_free;
+ return -EINVAL;
BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
- goto out_free;
+ return -EINVAL;
tmp.filter_cfg_chains[i] =
cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
*filters = tmp;
-out_free:
- kfree(data);
+ return 0;
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e50b93472dd2..68d8fb5f6357 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2023, 2025 Intel Corporation
*/
#ifndef __iwl_fw_acpi__
#define __iwl_fw_acpi__
@@ -180,8 +180,7 @@ int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters);
+int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt);
void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
@@ -244,8 +243,10 @@ static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
return -ENOENT;
}
-/* macro since the second argument doesn't always exist */
-#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
+static inline int iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 42360a8f23aa..3ce477c248ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -112,6 +112,16 @@ struct iwl_alive_ntf_v6 {
struct iwl_imr_alive_info imr;
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
+struct iwl_alive_ntf {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+ struct iwl_sku_id sku_id;
+ struct iwl_imr_alive_info imr;
+ __le64 platform_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_8 */
+
/**
* enum iwl_extended_cfg_flags - commands driver may send before
* finishing init flow
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index d43adb6f9220..1c86a858aaab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2018-2022, 2024-2025 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -145,8 +145,8 @@ enum iwl_legacy_cmds {
REMOVE_STA = 0x19,
/**
- * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
- * &struct iwl_tx_cmd_gen3,
+ * @TX_CMD: uses &struct iwl_tx_cmd_v6 or &struct iwl_tx_cmd_v9 or
+ * &struct iwl_tx_cmd,
* response in &struct iwl_tx_resp or
* &struct iwl_tx_resp_v3
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index c139b965980d..9c88bb280609 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -98,7 +98,7 @@ enum iwl_data_path_subcmd_ids {
/**
* @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
- * uses &struct iwl_esr_mode_notif
+ * uses &struct iwl_esr_mode_notif or &struct iwl_esr_mode_notif_v1
*/
ESR_MODE_NOTIF = 0xF3,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 4fab6c66994e..3173fa96cb48 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -527,8 +527,8 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
- * @IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE: perform reset handshake and
- * split dump to before/after with region marking
+ * @IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET: split this dump into regions
+ * before and after the reset handshake
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -537,7 +537,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
- IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE = BIT(17),
+ IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET = BIT(17),
};
/**
@@ -560,7 +560,7 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET: dump this region before reset
- * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)
+ * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET)
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index e1952fc6d149..33541f92c7c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -6,6 +6,11 @@
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
+#include <linux/ieee80211.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+#include "rs.h"
/**
* enum iwl_location_subcmd_ids - location group command IDs
@@ -1609,7 +1614,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v7 - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@@ -1645,7 +1650,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
* @tx_pn: the last PN used for this responder Tx in case PMF is configured in
* LE byte order.
*/
-struct iwl_tof_range_rsp_ap_entry_ntfy {
+struct iwl_tof_range_rsp_ap_entry_ntfy_v7 {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@@ -1672,6 +1677,65 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6,
LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @reserved1: reserved, for backwards compatibility
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
+ * @rx_pn: the last PN used for this responder Rx in case PMF is configured in
+ * LE byte order.
+ * @tx_pn: the last PN used for this responder Tx in case PMF is configured in
+ * LE byte order.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 reserved1[2];
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+ u8 rttConfidence;
+ u8 reserved[3];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_8 */
/**
* enum iwl_tof_response_status - tof response status
@@ -1739,6 +1803,24 @@ struct iwl_tof_range_rsp_ntfy_v7 {
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
/**
+ * struct iwl_tof_range_rsp_ntfy_v9 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v9 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v7 ap[IWL_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
+ * LOCATION_RANGE_RSP_NTFY_API_S_VER_9
+ */
+
+/**
* struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
@@ -1752,8 +1834,7 @@ struct iwl_tof_range_rsp_ntfy {
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
- LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_10 */
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b511e3aa6bb2..b9f559dac39f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -311,8 +311,41 @@ enum iwl_mac_config_filter_flags {
}; /* MAC_FILTER_FLAGS_MASK_E_VER_1 */
/**
+ * struct iwl_mac_wifi_gen_support_v2 - parameters of iwl_mac_config_cmd
+ * with support up to eht as in version 2 of the command
+ *
+ * @he_support: does this MAC support HE
+ * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling
+ * @eht_support: does this MAC support EHT. Requires he_support
+ */
+struct iwl_mac_wifi_gen_support_v2 {
+ __le16 he_support;
+ __le16 he_ap_support;
+ __le32 eht_support;
+} __packed;
+
+/**
+ * struct iwl_mac_wifi_gen_support - parameters of iwl_mac_config_cmd
+ * with support up to uhr as in version 3 of the command
+ * ( MAC_CONTEXT_CONFIG_CMD = 0x8 )
+ *
+ * @he_support: does this MAC support HE
+ * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling
+ * @eht_support: does this MAC support EHT. Requires he_support
+ * @uhr_support: does this MAC support UHR. Requires eht_support
+ * @reserved: reserved for alignment and to match version 2's size
+ */
+struct iwl_mac_wifi_gen_support {
+ u8 he_support;
+ u8 he_ap_support;
+ u8 eht_support;
+ u8 uhr_support;
+ __le32 reserved;
+} __packed;
+
+/**
* struct iwl_mac_config_cmd - command structure to configure MAC contexts in
- * MLD API
+ * MLD API for versions 2 and 3
* ( MAC_CONTEXT_CONFIG_CMD = 0x8 )
*
* @id_and_color: ID and color of the MAC
@@ -321,9 +354,8 @@ enum iwl_mac_config_filter_flags {
* @local_mld_addr: mld address
* @reserved_for_local_mld_addr: reserved
* @filter_flags: combination of &enum iwl_mac_config_filter_flags
- * @he_support: does this MAC support HE
- * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling
- * @eht_support: does this MAC support EHT. Requires he_support
+ * @wifi_gen_v2: he/eht parameters as in cmd version 2
+ * @wifi_gen: he/eht/uhr parameters as in cmd version 3
* @nic_not_ack_enabled: mark that the NIC doesn't support receiving
* ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG).
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
@@ -332,7 +364,6 @@ enum iwl_mac_config_filter_flags {
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
- /* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
/* MAC_CONTEXT_TYPE_API_E */
@@ -340,16 +371,17 @@ struct iwl_mac_config_cmd {
u8 local_mld_addr[6];
__le16 reserved_for_local_mld_addr;
__le32 filter_flags;
- __le16 he_support;
- __le16 he_ap_support;
- __le32 eht_support;
+ union {
+ struct iwl_mac_wifi_gen_support_v2 wifi_gen_v2;
+ struct iwl_mac_wifi_gen_support wifi_gen;
+ };
__le32 nic_not_ack_enabled;
/* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_2 */
union {
struct iwl_mac_client_data client;
struct iwl_mac_p2p_dev_data p2p_dev;
};
-} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2 */
+} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2_VER_3 */
/**
* enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being
@@ -457,6 +489,24 @@ enum iwl_link_modify_bandwidth {
};
/**
+ * struct iwl_npca_params - NPCA parameters (non-primary channel access)
+ *
+ * @switch_delay: after switch, delay TX according to destination AP
+ * @switch_back_delay: switch back to control channel before OBSS frame end
+ * @min_dur_threshold: minimum PPDU time to switch to the non-primary
+ * NPCA channel
+ * @flags: NPCA flags - bit 0: puncturing allowed, bit 1: new TX allowed
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_npca_params {
+ u8 switch_delay;
+ u8 switch_back_delay;
+ __le16 min_dur_threshold;
+ __le16 flags;
+ __le16 reserved;
+} __packed; /* NPCA_PARAM_API_S_VER_1 */
+
+/**
* struct iwl_link_config_cmd - command structure to configure the LINK context
* in MLD API
* ( LINK_CONFIG_CMD =0x9 )
@@ -513,6 +563,8 @@ enum iwl_link_modify_bandwidth {
* IEEE802.11REVme-D5.0
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
+ * @npca_params: NPCA parameters
+ * @prio_edca_params: priority EDCA parameters for enhanced QoS
* @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
@@ -560,8 +612,10 @@ struct iwl_link_config_cmd {
u8 ul_mu_data_disable;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved3[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6 */
+ struct iwl_npca_params npca_params; /* since _VER_7 */
+ struct iwl_ac_qos prio_edca_params; /* since _VER_7 */
+ __le32 reserved3[4];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3, _VER_4, _VER_5, _VER_6, _VER_7 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
@@ -589,6 +643,62 @@ enum iwl_fw_sta_type {
}; /* STATION_TYPE_E_VER_1 */
/**
+ * struct iwl_sta_cfg_cmd_v1 - cmd structure to add a peer sta to the uCode's
+ * station table
+ * ( STA_CONFIG_CMD = 0xA )
+ *
+ * @sta_id: index of station in uCode's station table
+ * @link_id: the id of the link that is used to communicate with this sta
+ * @peer_mld_address: the peers mld address
+ * @reserved_for_peer_mld_address: reserved
+ * @peer_link_address: the address of the link that is used to communicate
+ * with this sta
+ * @reserved_for_peer_link_address: reserved
+ * @station_type: type of this station. See &enum iwl_fw_sta_type
+ * @assoc_id: for GO only
+ * @beamform_flags: beam forming controls
+ * @mfp: indicates whether the STA uses management frame protection or not.
+ * @mimo: indicates whether the sta uses mimo or not
+ * @mimo_protection: indicates whether the sta uses mimo protection or not
+ * @ack_enabled: indicates that the AP supports receiving ACK-
+ * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @trig_rnd_alloc: indicates that trigger based random allocation
+ * is enabled according to UORA element existence
+ * @tx_ampdu_spacing: minimum A-MPDU spacing:
+ * 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density
+ * @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K,
+ * 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K.
+ * @sp_length: the size of the SP in actual number of frames
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY
+ * capa
+ * @htc_flags: which features are supported in HTC
+ */
+struct iwl_sta_cfg_cmd_v1 {
+ __le32 sta_id;
+ __le32 link_id;
+ u8 peer_mld_address[ETH_ALEN];
+ __le16 reserved_for_peer_mld_address;
+ u8 peer_link_address[ETH_ALEN];
+ __le16 reserved_for_peer_link_address;
+ __le32 station_type;
+ __le32 assoc_id;
+ __le32 beamform_flags;
+ __le32 mfp;
+ __le32 mimo;
+ __le32 mimo_protection;
+ __le32 ack_enabled;
+ __le32 trig_rnd_alloc;
+ __le32 tx_ampdu_spacing;
+ __le32 tx_ampdu_max_size;
+ __le32 sp_length;
+ __le32 uapsd_acs;
+ struct iwl_he_pkt_ext_v2 pkt_ext;
+ __le32 htc_flags;
+} __packed; /* STA_CMD_API_S_VER_1 */
+
+/**
* struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
* station table
* ( STA_CONFIG_CMD = 0xA )
@@ -620,6 +730,14 @@ enum iwl_fw_sta_type {
* @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY
* capa
* @htc_flags: which features are supported in HTC
+ * @use_ldpc_x2_cw: Indicates whether to use LDPC with double CW
+ * @use_icf: Indicates whether to use ICF instead of RTS
+ * @dps_pad_time: DPS (Dynamic Power Save) padding delay resolution to ensure
+ * proper timing alignment
+ * @dps_trans_delay: DPS minimal time that takes the peer to return to low power
+ * @mic_prep_pad_delay: MIC prep time padding
+ * @mic_compute_pad_delay: MIC compute time padding
+ * @reserved: Reserved for alignment
*/
struct iwl_sta_cfg_cmd {
__le32 sta_id;
@@ -642,7 +760,14 @@ struct iwl_sta_cfg_cmd {
__le32 uapsd_acs;
struct iwl_he_pkt_ext_v2 pkt_ext;
__le32 htc_flags;
-} __packed; /* STA_CMD_API_S_VER_1 */
+ u8 use_ldpc_x2_cw;
+ u8 use_icf;
+ u8 dps_pad_time;
+ u8 dps_trans_delay;
+ u8 mic_prep_pad_delay;
+ u8 mic_compute_pad_delay;
+ u8 reserved[2];
+} __packed; /* STA_CMD_API_S_VER_2 */
/**
* struct iwl_aux_sta_cmd - command for AUX STA configuration
@@ -686,9 +811,9 @@ struct iwl_mvm_sta_disable_tx_cmd {
/**
* enum iwl_mvm_fw_esr_recommendation - FW recommendation code
- * @ESR_RECOMMEND_LEAVE: recommendation to leave esr
- * @ESR_FORCE_LEAVE: force exiting esr
- * @ESR_RECOMMEND_ENTER: recommendation to enter esr
+ * @ESR_RECOMMEND_LEAVE: recommendation to leave EMLSR
+ * @ESR_FORCE_LEAVE: force exiting EMLSR
+ * @ESR_RECOMMEND_ENTER: recommendation to enter EMLSR
*/
enum iwl_mvm_fw_esr_recommendation {
ESR_RECOMMEND_LEAVE,
@@ -697,15 +822,46 @@ enum iwl_mvm_fw_esr_recommendation {
}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
/**
- * struct iwl_esr_mode_notif - FWs recommendation/force for esr mode
+ * struct iwl_esr_mode_notif_v1 - FW recommendation/force for EMLSR mode
*
- * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
+ * @action: the action to apply on EMLSR state.
+ * See &iwl_mvm_fw_esr_recommendation
*/
-struct iwl_esr_mode_notif {
+struct iwl_esr_mode_notif_v1 {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
/**
+ * enum iwl_esr_leave_reason - reasons for leaving EMLSR mode
+ *
+ * @ESR_LEAVE_REASON_OMI_MU_UL_DISALLOWED: OMI MU UL disallowed
+ * @ESR_LEAVE_REASON_NO_TRIG_FOR_ESR_STA: No trigger for EMLSR station
+ * @ESR_LEAVE_REASON_NO_ESR_STA_IN_MU_DL: No EMLSR station in MU DL
+ * @ESR_LEAVE_REASON_BAD_ACTIV_FRAME_TH: Bad activation frame threshold
+ * @ESR_LEAVE_REASON_RTS_IN_DUAL_LISTEN: RTS in dual listen
+ */
+enum iwl_esr_leave_reason {
+ ESR_LEAVE_REASON_OMI_MU_UL_DISALLOWED = BIT(0),
+ ESR_LEAVE_REASON_NO_TRIG_FOR_ESR_STA = BIT(1),
+ ESR_LEAVE_REASON_NO_ESR_STA_IN_MU_DL = BIT(2),
+ ESR_LEAVE_REASON_BAD_ACTIV_FRAME_TH = BIT(3),
+ ESR_LEAVE_REASON_RTS_IN_DUAL_LISTEN = BIT(4),
+};
+
+/**
+ * struct iwl_esr_mode_notif - FW recommendation/force for EMLSR mode
+ *
+ * @action: the action to apply on EMLSR state.
+ * See &iwl_mvm_fw_esr_recommendation
+ * @leave_reason_mask: mask for various reasons to leave EMLSR mode.
+ * See &iwl_esr_leave_reason
+ */
+struct iwl_esr_mode_notif {
+ __le32 action;
+ __le32 leave_reason_mask;
+} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_2 */
+
+/**
* struct iwl_missed_beacons_notif - sent when by the firmware upon beacon loss
* ( MISSED_BEACONS_NOTIF = 0xF6 )
* @link_id: fw link ID
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 26301c0b06a1..2a174c00b712 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -287,9 +287,9 @@ struct iwl_ac_qos {
__le16 cw_min;
__le16 cw_max;
u8 aifsn;
- u8 fifos_mask;
+ u8 fifos_mask; /* not in use since _VER_3 */
__le16 edca_txop;
-} __packed; /* AC_QOS_API_S_VER_2 */
+} __packed; /* AC_QOS_API_S_VER_2, _VER_3 */
/**
* struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 4d8a12799c4d..4594a7c94bd6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -146,6 +146,7 @@ struct iwl_phy_context_cmd_v1 {
* @sbb_ctrl_channel_loc: location of the control channel
* @puncture_mask: bitmap of punctured subchannels
* @dsp_cfg_flags: set to 0
+ * @secondary_ctrl_chnl_loc: location of secondary control channel
* @reserved: reserved to align to 64 bit
*/
struct iwl_phy_context_cmd {
@@ -164,11 +165,13 @@ struct iwl_phy_context_cmd {
};
};
__le32 dsp_cfg_flags;
- __le32 reserved;
+ u8 secondary_ctrl_chnl_loc;
+ u8 reserved[3];
} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
* PHY_CONTEXT_CMD_API_VER_4,
* PHY_CONTEXT_CMD_API_VER_5,
- * PHY_CONTEXT_CMD_API_VER_6
+ * PHY_CONTEXT_CMD_API_VER_6,
+ * PHY_CONTEXT_CMD_API_S_VER_7
*/
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 37ec26596ee7..23140205ccb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_power_h__
#define __iwl_fw_api_power_h__
+#include "nvm-reg.h"
+
/* Power Management Commands, Responses, Notifications */
/**
@@ -54,7 +56,7 @@ struct iwl_ltr_config_cmd_v1 {
* @flags: See &enum iwl_ltr_config_flags
* @static_long: static LTR Long register value.
* @static_short: static LTR Short register value.
- * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order:
+ * @ltr_cfg_values: LTR parameters table values (in usec) in following order:
* TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES
* is set.
* @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if
@@ -89,6 +91,7 @@ struct iwl_ltr_config_cmd {
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
* @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
* detection enablement
+ * @POWER_FLAGS_ENABLE_SMPS_MSK: SMPS is allowed for this vif
*/
enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
@@ -99,6 +102,7 @@ enum iwl_power_flags {
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12),
+ POWER_FLAGS_ENABLE_SMPS_MSK = BIT(14),
};
#define IWL_POWER_VEC_SIZE 5
@@ -216,7 +220,6 @@ struct iwl_mac_power_cmd {
/* CONTEXT_DESC_API_T_VER_1 */
__le32 id_and_color;
- /* CLIENT_PM_POWER_TABLE_S_VER_1 */
__le16 flags;
__le16 keep_alive_seconds;
__le32 rx_data_timeout;
@@ -237,7 +240,7 @@ struct iwl_mac_power_cmd {
u8 heavy_rx_thld_percentage;
u8 limited_ps_threshold;
u8 reserved;
-} __packed;
+} __packed; /* CLIENT_PM_POWER_TABLE_S_VER_1, VER_2 */
/*
* struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
@@ -628,28 +631,37 @@ enum iwl_ppag_flags {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
- * @v1: version 1
- * @v2: version 2
- * version 3, 4, 5 and 6 are the same structure as v2,
+ * @v1: command version 1 structure.
+ * @v2: command version from 2 to 6 are same structure as v2.
* but has a different format of the flags bitmap
+ * @v3: command version 7 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
* @v1.reserved: reserved
* @v2.flags: values from &enum iwl_ppag_flags
* @v2.gain: table of antenna gain values per chain and sub-band
- * @v2.reserved: reserved
+ * @v3.ppag_config_info: see @struct bios_value_u32
+ * @v3.gain: table of antenna gain values per chain and sub-band
+ * @v3.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1];
s8 reserved[2];
- } v1;
+ } __packed v1; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_1 */
struct {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } v2;
+ } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4,
+ * VER5, VER6
+ */
+ struct {
+ struct bios_value_u32 ppag_config_info;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ s8 reserved[2];
+ } __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
} __packed;
#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
@@ -657,6 +669,15 @@ union iwl_ppag_table_cmd {
IWL_PPAG_ETSI_LPI_UHB_MASK | \
IWL_PPAG_USA_LPI_UHB_MASK)
+#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \
+ IWL_PPAG_ETSI_VLP_UHB_MASK | \
+ IWL_PPAG_ETSI_SP_UHB_MASK | \
+ IWL_PPAG_USA_VLP_UHB_MASK | \
+ IWL_PPAG_USA_SP_UHB_MASK | \
+ IWL_PPAG_CANADA_LPI_UHB_MASK | \
+ IWL_PPAG_CANADA_VLP_UHB_MASK | \
+ IWL_PPAG_CANADA_SP_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
@@ -690,7 +711,7 @@ struct iwl_sar_offset_mapping_cmd {
* Roaming Energy Delta Threshold, otherwise use normal Energy Delta
* Threshold. Typical energy threshold is -72dBm.
* @bf_temp_threshold: This threshold determines the type of temperature
- * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * filtering (Slow or Fast) that is selected (Units are in Celsius):
* If the current temperature is above this threshold - Fast filter
* will be used, If the current temperature is below this threshold -
* Slow filter will be used.
@@ -698,12 +719,12 @@ struct iwl_sar_offset_mapping_cmd {
* calculated for this and the last passed beacon is greater than this
* threshold. Zero value means that the temperature change is ignored for
* beacon filtering; beacons will not be forced to be sent to driver
- * regardless of whether its temerature has been changed.
+ * regardless of whether its temperature has been changed.
* @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
* calculated for this and the last passed beacon is greater than this
* threshold. Zero value means that the temperature change is ignored for
* beacon filtering; beacons will not be forced to be sent to driver
- * regardless of whether its temerature has been changed.
+ * regardless of whether its temperature has been changed.
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
* @bf_debug_flag: beacon filtering debug configuration
* @bf_escape_timer: Send beacons to to driver if no beacons were passed
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index c2f806cbab59..3222cbcbe1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
#define __iwl_fw_api_rs_h__
-
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bits.h>
#include "mac.h"
/**
@@ -213,7 +215,8 @@ enum iwl_tlc_update_flags {
* @sta_id: station id
* @reserved: reserved
* @flags: bitmap of notifications reported
- * @rate: current initial rate
+ * @rate: current initial rate, format depends on the notification
+ * version
* @amsdu_size: Max AMSDU size, in bytes
* @amsdu_enabled: bitmap for per-TID AMSDU enablement
*/
@@ -224,7 +227,7 @@ struct iwl_tlc_update_notif {
__le32 rate;
__le32 amsdu_size;
__le32 amsdu_enabled;
-} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2, _VER_3, _VER_4 */
/**
* enum iwl_tlc_debug_types - debug options
@@ -427,6 +430,7 @@ enum {
/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define RATE_VHT_MCS_NSS_MSK 0x30
/*
* Legacy OFDM rate format for bits 7:0
@@ -541,7 +545,7 @@ enum {
#define RATE_MCS_CTS_REQUIRED_POS (31)
#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS)
-/* rate_n_flags bit field version 2
+/* rate_n_flags bit field version 2 and 3
*
* The 32-bit value has different layouts in the low 8 bits depending on the
* format. There are three formats, HT, VHT and legacy (11abg, with subformats
@@ -553,23 +557,25 @@ enum {
* (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT)
* (3) Very High-throughput (VHT) (4) High-efficiency (HE)
* (5) Extremely High-throughput (EHT)
+ * (6) Ultra High Reliability (UHR) (v3 rate format only)
*/
#define RATE_MCS_MOD_TYPE_POS 8
#define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS)
-#define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_CCK (0 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_LEGACY_OFDM (1 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_HT (2 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_VHT (3 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_HE (4 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_EHT (5 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_MOD_TYPE_UHR (6 << RATE_MCS_MOD_TYPE_POS)
/*
* Legacy CCK rate format for bits 0:3:
*
- * (0) 0xa - 1 Mbps
- * (1) 0x14 - 2 Mbps
- * (2) 0x37 - 5.5 Mbps
- * (3) 0x6e - 11 nbps
+ * (0) 1 Mbps
+ * (1) 2 Mbps
+ * (2) 5.5 Mbps
+ * (3) 11 Mbps
*
* Legacy OFDM rate format for bis 3:0:
*
@@ -586,15 +592,19 @@ enum {
#define RATE_LEGACY_RATE_MSK 0x7
/*
- * HT, VHT, HE, EHT rate format for bits 3:0
- * 3-0: MCS
- *
+ * HT, VHT, HE, EHT, UHR rate format
+ * Version 2: (not applicable for UHR)
+ * 3-0: MCS
+ * 4: NSS==2 indicator
+ * Version 3:
+ * 4-0: MCS
+ * 5: NSS==2 indicator
*/
#define RATE_HT_MCS_CODE_MSK 0x7
-#define RATE_MCS_NSS_POS 4
-#define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS)
-#define RATE_MCS_CODE_MSK 0xf
-#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \
+#define RATE_MCS_NSS_MSK_V2 0x10
+#define RATE_MCS_NSS_MSK 0x20
+#define RATE_MCS_CODE_MSK 0x1f
+#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 2) | \
((r) & RATE_HT_MCS_CODE_MSK))
/* Bits 7-5: reserved */
@@ -810,11 +820,38 @@ struct iwl_lq_cmd {
}; /* LINK_QUALITY_CMD_API_S_VER_1 */
u8 iwl_fw_rate_idx_to_plcp(int idx);
-u32 iwl_new_rate_from_v1(u32 rate_v1);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
bool iwl_he_is_sgi(u32 rate_n_flags);
+static inline u32 iwl_v3_rate_from_v2_v3(__le32 rate, bool fw_v3)
+{
+ u32 val;
+
+ if (fw_v3)
+ return le32_to_cpu(rate);
+
+ val = le32_to_cpu(rate) & ~RATE_MCS_NSS_MSK_V2;
+ val |= u32_encode_bits(le32_get_bits(rate, RATE_MCS_NSS_MSK_V2),
+ RATE_MCS_NSS_MSK);
+
+ return val;
+}
+
+static inline __le32 iwl_v3_rate_to_v2_v3(u32 rate, bool fw_v3)
+{
+ __le32 val;
+
+ if (fw_v3)
+ return cpu_to_le32(rate);
+
+ val = cpu_to_le32(rate & ~RATE_MCS_NSS_MSK);
+ val |= le32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
+ RATE_MCS_NSS_MSK_V2);
+
+ return val;
+}
+
#endif /* __iwl_fw_api_rs_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 691c879cb90d..7cf6d6ac7430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -193,10 +193,11 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
-#define RX_MPDU_BAND_POS 6
-#define RX_MPDU_BAND_MASK 0xC0
-#define BAND_IN_RX_STATUS(_val) \
- (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
+enum iwl_rx_mpdu_mac_phy_band {
+ IWL_RX_MPDU_MAC_PHY_BAND_MAC_MASK = 0x0f,
+ IWL_RX_MPDU_MAC_PHY_BAND_PHY_MASK = 0x30,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK = 0xc0,
+};
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
@@ -639,7 +640,9 @@ struct iwl_rx_mpdu_desc_v3 {
*/
__le32 reserved[1];
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
- RX_MPDU_RES_START_API_S_VER_5 */
+ * RX_MPDU_RES_START_API_S_VER_5,
+ * RX_MPDU_RES_START_API_S_VER_6
+ */
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
@@ -668,9 +671,10 @@ struct iwl_rx_mpdu_desc {
*/
__le16 phy_info;
/**
- * @mac_phy_idx: MAC/PHY index
+ * @mac_phy_band: MAC ID, PHY ID, band;
+ * see &enum iwl_rx_mpdu_mac_phy_band
*/
- u8 mac_phy_idx;
+ u8 mac_phy_band;
/* DW4 */
union {
struct {
@@ -722,8 +726,10 @@ struct iwl_rx_mpdu_desc {
struct iwl_rx_mpdu_desc_v3 v3;
};
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
- RX_MPDU_RES_START_API_S_VER_4,
- RX_MPDU_RES_START_API_S_VER_5 */
+ * RX_MPDU_RES_START_API_S_VER_4,
+ * RX_MPDU_RES_START_API_S_VER_5,
+ * RX_MPDU_RES_START_API_S_VER_6
+ */
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
@@ -819,7 +825,7 @@ struct iwl_rx_no_data {
* 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel
* @on_air_rise_time: GP2 during on air rise
* @fr_time: frame time
- * @rate: rate/mcs of frame
+ * @rate: rate/mcs of frame, format depends on the notification version
* @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type
* @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
* for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
@@ -835,9 +841,7 @@ struct iwl_rx_no_data_ver_3 {
__le32 rate;
__le32 phy_info[2];
__le32 rx_vec[4];
-} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
- RX_NO_DATA_NTFY_API_S_VER_2
- RX_NO_DATA_NTFY_API_S_VER_3 */
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_3, _VER_4 */
struct iwl_frame_release {
u8 baid;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 0a9f14fb04be..00713a991879 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 - 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2023-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -584,6 +584,9 @@ struct iwl_stats_ntfy_per_phy {
__le32 last_tx_ch_width_indx;
} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */
+/* unknown channel load (due to not being active on channel) */
+#define IWL_STATS_UNKNOWN_CHANNEL_LOAD 0xffffffff
+
/**
* struct iwl_stats_ntfy_per_sta
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index cfa6532a3cdd..58d5a6ef633e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -50,7 +50,7 @@ struct iwl_tdls_channel_switch_timing {
*/
struct iwl_tdls_channel_switch_frame {
__le32 switch_time_offset;
- struct iwl_tx_cmd tx_cmd;
+ struct iwl_tx_cmd_v6 tx_cmd;
u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
@@ -131,7 +131,7 @@ struct iwl_tdls_config_cmd {
struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
- struct iwl_tx_cmd pti_req_tx_cmd;
+ struct iwl_tx_cmd_v6 pti_req_tx_cmd;
u8 pti_req_template[];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 0a39e4b6eb62..557832563f89 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -151,7 +151,7 @@ enum iwl_tx_cmd_sec_ctrl {
#define IWL_LOW_RETRY_LIMIT 7
/**
- * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
+ * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd_v6 offload_assist values
* @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
* from mac header end. For normal case it is 4 words for SNAP.
* note: tx_cmd, mac header and pad are not counted in the offset.
@@ -181,7 +181,7 @@ enum iwl_tx_offload_assist_flags_pos {
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
- * struct iwl_tx_cmd - TX command struct to FW
+ * struct iwl_tx_cmd_v6 - TX command struct to FW
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
@@ -221,7 +221,7 @@ enum iwl_tx_offload_assist_flags_pos {
* After the struct fields the MAC header is placed, plus any padding,
* and then the actial payload.
*/
-struct iwl_tx_cmd {
+struct iwl_tx_cmd_v6 {
__le16 len;
__le16 offload_assist;
__le32 tx_flags;
@@ -258,7 +258,7 @@ struct iwl_dram_sec_info {
} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
/**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * struct iwl_tx_cmd_v9 - TX command struct to FW for 22000 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
@@ -268,7 +268,7 @@ struct iwl_dram_sec_info {
* cleared. Combination of RATE_MCS_*
* @hdr: 802.11 header
*/
-struct iwl_tx_cmd_gen2 {
+struct iwl_tx_cmd_v9 {
__le16 len;
__le16 offload_assist;
__le32 flags;
@@ -279,18 +279,18 @@ struct iwl_tx_cmd_gen2 {
TX_CMD_API_S_VER_9 */
/**
- * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
+ * struct iwl_tx_cmd - TX command struct to FW for AX210+ devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @flags: combination of &enum iwl_tx_cmd_flags
* @offload_assist: TX offload configuration
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
- * cleared. Combination of RATE_MCS_*
+ * cleared. Combination of RATE_MCS_*; format depends on version
* @reserved: reserved
* @hdr: 802.11 header
*/
-struct iwl_tx_cmd_gen3 {
+struct iwl_tx_cmd {
__le16 len;
__le16 flags;
__le32 offload_assist;
@@ -298,7 +298,9 @@ struct iwl_tx_cmd_gen3 {
__le32 rate_n_flags;
u8 reserved[8];
struct ieee80211_hdr hdr[];
-} __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */
+} __packed; /* TX_CMD_API_S_VER_10,
+ * TX_CMD_API_S_VER_11
+ */
/*
* TX response related data
@@ -549,7 +551,7 @@ struct iwl_tx_resp_v3 {
* @failure_rts: num of failures due to unsuccessful RTS
* @failure_frame: num failures due to no ACK (unused for agg)
* @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
- * Tx of all the batch. RATE_MCS_*
+ * Tx of all the batch. RATE_MCS_*; format depends on command version
* @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
* for agg: RTS + CTS + aggregation tx time + block-ack time.
* in usec.
@@ -600,8 +602,10 @@ struct iwl_tx_resp {
__le16 reserved2;
struct agg_tx_status status;
} __packed; /* TX_RSP_API_S_VER_6,
- TX_RSP_API_S_VER_7,
- TX_RSP_API_S_VER_8 */
+ * TX_RSP_API_S_VER_7,
+ * TX_RSP_API_S_VER_8,
+ * TX_RSP_API_S_VER_9
+ */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -701,7 +705,8 @@ enum iwl_mvm_ba_resp_flags {
* @rts_retry_cnt: RTS retry count
* @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
- * @tx_rate: the rate the aggregation was sent at
+ * @tx_rate: the rate the aggregation was sent at. Format depends on command
+ * version.
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
* @tfd: array of TFD queue status updates. See &iwl_compressed_ba_tfd
@@ -730,7 +735,8 @@ struct iwl_compressed_ba_notif {
DECLARE_FLEX_ARRAY(struct iwl_compressed_ba_tfd, tfd);
};
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
- COMPRESSED_BA_RES_API_S_VER_5 */
+ COMPRESSED_BA_RES_API_S_VER_6,
+ COMPRESSED_BA_RES_API_S_VER_7 */
/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
@@ -742,7 +748,7 @@ struct iwl_compressed_ba_notif {
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_v6 {
- struct iwl_tx_cmd tx;
+ struct iwl_tx_cmd_v6 tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
@@ -761,7 +767,7 @@ struct iwl_mac_beacon_cmd_v6 {
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_v7 {
- struct iwl_tx_cmd tx;
+ struct iwl_tx_cmd_v6 tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 03f639fbf9b6..ea739ebe7cb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -187,7 +187,7 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
/* Pull RXF2 */
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
RXF_DIFF_FROM_PREV +
- fwrt->trans->trans_cfg->umac_prph_offset, 1);
+ fwrt->trans->mac_cfg->umac_prph_offset, 1);
/* Pull LMAC2 RXF1 */
if (fwrt->smem_cfg.num_lmacs > 1)
iwl_fwrt_dump_rxf(fwrt, dump_data,
@@ -654,10 +654,10 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
{
u32 range_len;
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
- } else if (fwrt->trans->trans_cfg->device_family >=
+ } else if (fwrt->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
@@ -665,7 +665,7 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
- if (fwrt->trans->trans_cfg->mq_rx_supported) {
+ if (fwrt->trans->mac_cfg->mq_rx_supported) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
}
@@ -809,13 +809,14 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+ u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->mac_cfg->base->smem_len;
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
int i;
/* SRAM - include stack CCM if driver knows the values for it */
- if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
+ if (!fwrt->trans->cfg->dccm_offset ||
+ !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
@@ -838,7 +839,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
iwl_fw_prph_handler(fwrt, &prph_len,
iwl_fw_get_prph_len);
- if (fwrt->trans->trans_cfg->device_family ==
+ if (fwrt->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_7000 &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
@@ -876,7 +877,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
file_len += sizeof(*dump_data) +
- fwrt->trans->cfg->d3_debug_data_length * 2;
+ fwrt->trans->mac_cfg->base->d3_debug_data_length * 2;
}
/* If we only want a monitor dump, reset the file length */
@@ -904,13 +905,14 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
dump_info->hw_type =
- cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
+ cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev));
dump_info->hw_step =
- cpu_to_le32(fwrt->trans->hw_rev_step);
+ cpu_to_le32(fwrt->trans->info.hw_rev_step);
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strscpy_pad(dump_info->dev_human_readable, fwrt->trans->name,
- sizeof(dump_info->dev_human_readable));
+ strscpy_pad(dump_info->dev_human_readable,
+ fwrt->trans->info.name,
+ sizeof(dump_info->dev_human_readable));
strscpy_pad(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable));
dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
@@ -996,7 +998,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
- fwrt->trans->cfg->smem_offset,
+ fwrt->trans->mac_cfg->base->smem_offset,
IWL_FW_ERROR_DUMP_MEM_SMEM);
iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
@@ -1005,8 +1007,8 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
- u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
- size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
+ u32 addr = fwrt->trans->mac_cfg->base->d3_debug_data_base_addr;
+ size_t data_size = fwrt->trans->mac_cfg->base->d3_debug_data_length;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
dump_data->len = cpu_to_le32(data_size * 2);
@@ -1109,7 +1111,7 @@ static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt,
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = size;
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
indirect_wr_addr = WMAL_INDRCT_CMD1;
indirect_wr_addr += le32_to_cpu(offset);
@@ -1266,7 +1268,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
/* all paged index start from 1 to skip CSS section */
idx++;
- if (!fwrt->trans->trans_cfg->gen2)
+ if (!fwrt->trans->mac_cfg->gen2)
return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx);
range = range_ptr;
@@ -1790,7 +1792,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id,
&addrs->write_ptr);
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
u32 wrt_ptr = le32_to_cpu(data->write_ptr);
data->write_ptr = cpu_to_le32(wrt_ptr >> 2);
@@ -1817,7 +1819,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
- &fwrt->trans->cfg->mon_dram_regs);
+ &fwrt->trans->mac_cfg->base->mon_dram_regs);
}
static void *
@@ -1830,7 +1832,7 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
- &fwrt->trans->cfg->mon_smem_regs);
+ &fwrt->trans->mac_cfg->base->mon_smem_regs);
}
static void *
@@ -1844,7 +1846,7 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
/* no offset calculation later */
IWL_FW_INI_ALLOCATION_ID_DBGC1,
mon_dump,
- &fwrt->trans->cfg->mon_dbgi_regs);
+ &fwrt->trans->mac_cfg->base->mon_dbgi_regs);
}
static void *
@@ -1909,7 +1911,7 @@ iwl_dump_ini_mem_block_ranges(struct iwl_fw_runtime *fwrt,
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
- if (fwrt->trans->trans_cfg->gen2) {
+ if (fwrt->trans->mac_cfg->gen2) {
if (fwrt->trans->init_dram.paging_cnt)
return fwrt->trans->init_dram.paging_cnt - 1;
else
@@ -2021,7 +2023,7 @@ iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
/* start from 1 to skip CSS section */
for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) {
size += range_header_len;
- if (fwrt->trans->trans_cfg->gen2)
+ if (fwrt->trans->mac_cfg->gen2)
size += fwrt->trans->init_dram.paging[i].size;
else
size += fwrt->fw_paging_db[i].fw_paging_size;
@@ -2375,7 +2377,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
- u32 hw_type;
+ u32 hw_type, is_cdb, is_jacket;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@@ -2403,33 +2405,24 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type);
dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype);
- dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step);
+ dump->hw_step = cpu_to_le32(fwrt->trans->info.hw_rev_step);
- /*
- * Several HWs all have type == 0x42, so we'll override this value
- * according to the detected HW
- */
- hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
- if (hw_type == IWL_AX210_HW_TYPE) {
- u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
- u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
- u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
- u32 masked_bits = is_jacket | (is_cdb << 1);
+ hw_type = CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev);
+
+ is_cdb = CSR_HW_RFID_IS_CDB(fwrt->trans->info.hw_rf_id);
+ is_jacket = !!(iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR) &
+ WFPM_OTP_CFG1_IS_JACKET_BIT);
+
+ /* Use bits 12 and 13 to indicate jacket/CDB, respectively */
+ hw_type |= (is_jacket | (is_cdb << 1)) << IWL_JACKET_CDB_SHIFT;
- /*
- * The HW type depends on certain bits in this case, so add
- * these bits to the HW type. We won't have collisions since we
- * add these bits after the highest possible bit in the mask.
- */
- hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT;
- }
dump->hw_type = cpu_to_le32(hw_type);
dump->rf_id_flavor =
- cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id));
- dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id));
- dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id));
- dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id));
+ cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->info.hw_rf_id));
+ dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->info.hw_rf_id));
+ dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->info.hw_rf_id));
+ dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id));
dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major);
dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor);
@@ -2624,6 +2617,12 @@ enum iwl_dump_ini_region_selector {
IWL_INI_DUMP_LATE_REGIONS,
};
+static bool iwl_dump_due_to_error(enum iwl_fw_ini_time_point tp_id)
+{
+ return tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
+ tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
+}
+
static u32
iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
@@ -2689,8 +2688,7 @@ iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
* debug data which also need to be collected.
*/
if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
- if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
- tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
+ if (iwl_dump_due_to_error(tp_id))
imr_reg_data->reg_tlv =
fwrt->trans->dbg.active_regions[i];
else
@@ -2726,8 +2724,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
ARRAY_SIZE(fwrt->trans->dbg.active_regions));
- if (trigger->time_point &
- cpu_to_le32(IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)) {
+ if (trigger->apply_policy &
+ cpu_to_le32(IWL_FW_INI_APPLY_POLICY_SPLIT_DUMP_RESET)) {
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_EARLY_REGIONS);
@@ -2736,6 +2734,10 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_LATE_REGIONS);
} else {
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT) &&
+ iwl_dump_due_to_error(tp_id))
+ iwl_trans_pcie_fw_reset_handshake(fwrt->trans);
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_ALL_REGIONS);
@@ -3282,13 +3284,13 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
{
- const struct iwl_cfg *cfg = fwrt->trans->cfg;
+ const struct iwl_mac_cfg *mac_cfg = fwrt->trans->mac_cfg;
if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
return;
if (!fwrt->dump.d3_debug_data) {
- fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
+ fwrt->dump.d3_debug_data = kmalloc(mac_cfg->base->d3_debug_data_length,
GFP_KERNEL);
if (!fwrt->dump.d3_debug_data) {
IWL_ERR(fwrt,
@@ -3298,15 +3300,15 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
}
/* if the buffer holds previous debug data it is overwritten */
- iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
+ iwl_trans_read_mem_bytes(fwrt->trans, mac_cfg->base->d3_debug_data_base_addr,
fwrt->dump.d3_debug_data,
- cfg->d3_debug_data_length);
+ mac_cfg->base->d3_debug_data_length);
if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
- cfg->d3_debug_data_base_addr,
+ mac_cfg->base->d3_debug_data_base_addr,
fwrt->dump.d3_debug_data,
- cfg->d3_debug_data_length);
+ mac_cfg->base->d3_debug_data_length);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
@@ -3341,7 +3343,7 @@ static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
struct iwl_fw_dbg_params *params)
{
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
return;
}
@@ -3365,7 +3367,7 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
if (!params)
return -EIO;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
@@ -3467,7 +3469,7 @@ void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt)
GENMASK(31, IWL_FW_DBG_DOMAIN_POS + 1));
/* supported starting from 9000 devices */
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return;
if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1))
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 35e30e5db462..8034c9ecba69 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -201,7 +201,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
{
return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
- fwrt->trans->cfg->d3_debug_data_length && fwrt->ops &&
+ fwrt->trans->mac_cfg->base->d3_debug_data_length && fwrt->ops &&
fwrt->ops->d3_debug_enable &&
fwrt->ops->d3_debug_enable(fwrt->ops_ctx) &&
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
@@ -210,7 +210,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
{
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
- !fwrt->trans->trans_cfg->gen2 &&
+ !fwrt->trans->mac_cfg->gen2 &&
fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index f0c813d675f4..c70f2a20f7d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -311,7 +311,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct iwl_fw_runtime *fwrt,
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
fwrt->fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
- fwrt->trans->name);
+ fwrt->trans->info.name);
pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
fwrt->dev->bus->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index c7b261c8ec96..3ec42a4ea801 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -417,10 +417,10 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
struct iwl_trans *trans = fwrt->trans;
u32 error, data1;
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
error = UMAG_SB_CPU_2_STATUS;
data1 = UMAG_SB_CPU_1_STATUS;
- } else if (fwrt->trans->trans_cfg->device_family >=
+ } else if (fwrt->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
error = SB_CPU_2_STATUS;
data1 = SB_CPU_1_STATUS;
@@ -439,7 +439,7 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
iwl_read_umac_prph(trans, data1));
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
}
@@ -508,7 +508,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
iwl_fwrt_dump_rcm_error_log(fwrt, 1);
iwl_fwrt_dump_iml_error_log(fwrt);
iwl_fwrt_dump_fseq_regs(fwrt);
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
pc_data = fwrt->trans->dbg.pc_data;
if (!iwl_trans_grab_nic_access(fwrt->trans))
@@ -522,7 +522,7 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
iwl_trans_release_nic_access(fwrt->trans);
}
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH);
IWL_ERR(fwrt, "Function Scratch status:\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 3af275133da0..cf41021d59ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2014, 2018-2025 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -372,10 +372,7 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
-/* AX210's HW type */
-#define IWL_AX210_HW_TYPE 0x42
-/* How many bits to roll when adding to the HW type of AX210 HW */
-#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12
+#define IWL_JACKET_CDB_SHIFT 12
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9860903ecd3f..5a1ec880ed72 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -102,6 +102,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
IWL_UCODE_TLV_CURRENT_PC = 68,
+ IWL_UCODE_TLV_FSEQ_BIN_VERSION = 72,
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
IWL_UCODE_TLV_FW_NUM_LINKS = IWL_UCODE_TLV_CONST_BASE + 1,
@@ -402,6 +403,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA: supports (de)activating 5G9
* for CA from BIOS.
* @IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT: supports %TAS_UHB_ALLOWED_CANADA
+ * @IWL_UCODE_TLV_CAPA_EXT_FSEQ_IMAGE_SUPPORT: external FSEQ image support
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -504,6 +506,14 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA = (__force iwl_ucode_tlv_capa_t)123,
IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT = (__force iwl_ucode_tlv_capa_t)124,
+ IWL_UCODE_TLV_CAPA_EXT_FSEQ_IMAGE_SUPPORT = (__force iwl_ucode_tlv_capa_t)125,
+
+ /* set 4 */
+ /**
+ * @IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT: FW reset handshake is needed
+ * during assert handling even if the dump isn't split
+ */
+ IWL_UCODE_TLV_CAPA_RESET_DURING_ASSERT = (__force iwl_ucode_tlv_capa_t)(4 * 32 + 0),
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
@@ -994,6 +1004,10 @@ struct iwl_fw_dump_exclude {
__le32 addr, size;
};
+struct iwl_fw_fseq_bin_version {
+ __le32 major, minor;
+}; /* FW_TLV_FSEQ_BIN_VERSION_S */
+
static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
size_t fixed_size, size_t var_size)
{
@@ -1011,4 +1025,18 @@ static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
#define iwl_tlv_array_len_with_size(_tlv_ptr, _struct_ptr, _size) \
_iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), _size)
+
+/* external FSEQ file */
+#define IWL_FSEQ_FILE "intel/fseq-%04x-%04x"
+#define IWL_FSEQ_MAGIC "INTEL-CNV-FSEQ\n\0"
+
+struct iwl_fseq_file {
+ char magic[16];
+ char version[16];
+ __le32 bt_len;
+ __le32 wifi_len;
+ u8 reserved[8];
+ u8 data[];
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index de87e0e3e072..d1d8058ad29f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2024 Intel Corporation
+ * Copyright (C) 2019-2021, 2024-2025 Intel Corporation
*/
#include "iwl-drv.h"
#include "runtime.h"
@@ -72,7 +72,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
* values in VER_1, this is backwards-compatible with VER_2,
* as long as we don't set any other bits.
*/
- if (!fwrt->trans->trans_cfg->integrated)
+ if (!fwrt->trans->mac_cfg->integrated)
cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
@@ -84,17 +84,17 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
SOC_FLAGS_LTR_APPLY_DELAY_1820);
- if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
- !WARN_ON(!fwrt->trans->trans_cfg->integrated))
- cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
+ if (fwrt->trans->mac_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
+ !WARN_ON(!fwrt->trans->mac_cfg->integrated))
+ cmd.flags |= le32_encode_bits(fwrt->trans->mac_cfg->ltr_delay,
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
- fwrt->trans->trans_cfg->low_latency_xtal)
+ fwrt->trans->mac_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
- cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency);
+ cmd.latency = cpu_to_le32(fwrt->trans->mac_cfg->xtal_latency);
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
if (ret)
@@ -116,14 +116,14 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
* The default queue is configured via context info, so if we
* have a single queue, there's nothing to do here.
*/
- if (fwrt->trans->num_rx_queues == 1)
+ if (fwrt->trans->info.num_rxqs == 1)
return 0;
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_22000)
return 0;
/* skip the default queue */
- num_queues = fwrt->trans->num_rx_queues - 1;
+ num_queues = fwrt->trans->info.num_rxqs - 1;
size = struct_size(cmd, data, num_queues);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index a7b7cae874a2..826409f6f710 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -267,7 +267,7 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
const struct fw_img *fw = &fwrt->fw->img[type];
int ret;
- if (fwrt->trans->trans_cfg->gen2)
+ if (fwrt->trans->mac_cfg->gen2)
return 0;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 1195e708caa9..4f3c2f7f4f5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2024 Intel Corporation
+ * Copyright(c) 2020-2025 Intel Corporation
*/
#include "iwl-drv.h"
@@ -96,8 +96,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
"Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
mac_type, rf_id);
- if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) &&
- rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id))
+ if (mac_type == CSR_HW_REV_TYPE(trans->info.hw_rev) &&
+ rf_id == CSR_HW_RFID_TYPE(trans->info.hw_rf_id))
hw_match = true;
break;
case IWL_UCODE_TLV_SEC_RT: {
@@ -152,8 +152,8 @@ done:
if (!hw_match) {
IWL_DEBUG_FW(trans,
"HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n",
- CSR_HW_REV_TYPE(trans->hw_rev),
- CSR_HW_RFID_TYPE(trans->hw_rf_id));
+ CSR_HW_REV_TYPE(trans->info.hw_rev),
+ CSR_HW_RFID_TYPE(trans->info.hw_rf_id));
return -ENOENT;
}
@@ -167,7 +167,8 @@ done:
static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
size_t len,
- struct iwl_pnvm_image *pnvm_data)
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3])
{
const struct iwl_ucode_tlv *tlv;
@@ -190,23 +191,23 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- const struct iwl_sku_id *sku_id =
+ const struct iwl_sku_id *tlv_sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
tlv_len);
IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
- le32_to_cpu(sku_id->data[0]),
- le32_to_cpu(sku_id->data[1]),
- le32_to_cpu(sku_id->data[2]));
+ le32_to_cpu(tlv_sku_id->data[0]),
+ le32_to_cpu(tlv_sku_id->data[1]),
+ le32_to_cpu(tlv_sku_id->data[2]));
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
trans->reduced_cap_sku = false;
- rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
- if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type = CSR_HW_RFID_TYPE(trans->info.hw_rf_id);
+ if ((sku_id[0] & cpu_to_le32(IWL_PNVM_REDUCED_CAP_BIT)) &&
rf_type == IWL_CFG_RF_TYPE_FM)
trans->reduced_cap_sku = true;
@@ -214,9 +215,9 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
"Reduced SKU device %d\n",
trans->reduced_cap_sku);
- if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
- trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
- trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ if (sku_id[0] == tlv_sku_id->data[0] &&
+ sku_id[1] == tlv_sku_id->data[1] &&
+ sku_id[2] == tlv_sku_id->data[2]) {
int ret;
ret = iwl_pnvm_handle_section(trans, data, len,
@@ -263,13 +264,14 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
return 0;
}
-static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
+static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
+ __le32 sku_id[3])
{
struct pnvm_sku_package *package;
u8 *image = NULL;
/* Get PNVM from BIOS for non-Intel SKU */
- if (trans_p->sku_id[2]) {
+ if (sku_id[2]) {
package = iwl_uefi_get_pnvm(trans_p, len);
if (!IS_ERR_OR_NULL(package)) {
if (*len >= sizeof(*package)) {
@@ -294,8 +296,10 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
return image;
}
-static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+static void
+iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3])
{
struct iwl_pnvm_image *pnvm_data = NULL;
u8 *data = NULL;
@@ -309,7 +313,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
if (trans->pnvm_loaded)
goto set;
- data = iwl_get_pnvm_image(trans, &length);
+ data = iwl_get_pnvm_image(trans, &length, sku_id);
if (!data) {
trans->fail_to_parse_pnvm_image = true;
return;
@@ -319,7 +323,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
if (!pnvm_data)
goto free;
- ret = iwl_pnvm_parse(trans, data, length, pnvm_data);
+ ret = iwl_pnvm_parse(trans, data, length, pnvm_data, sku_id);
if (ret) {
trans->fail_to_parse_pnvm_image = true;
goto free;
@@ -339,7 +343,8 @@ free:
static void
iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3])
{
struct iwl_pnvm_image *pnvm_data = NULL;
u8 *data = NULL;
@@ -362,7 +367,8 @@ iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans,
if (!pnvm_data)
goto free;
- ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data);
+ ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data,
+ sku_id);
if (ret) {
trans->failed_to_load_reduce_power_image = true;
goto free;
@@ -386,18 +392,19 @@ free:
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait,
- const struct iwl_ucode_capabilities *capa)
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3])
{
struct iwl_notification_wait pnvm_wait;
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
PNVM_INIT_COMPLETE_NTFY) };
/* if the SKU_ID is empty, there's nothing to do */
- if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+ if (!sku_id[0] && !sku_id[1] && !sku_id[2])
return 0;
- iwl_pnvm_load_pnvm_to_trans(trans, capa);
- iwl_pnvm_load_reduce_power_to_trans(trans, capa);
+ iwl_pnvm_load_pnvm_to_trans(trans, capa, sku_id);
+ iwl_pnvm_load_reduce_power_to_trans(trans, capa, sku_id);
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
index 1bac3466154c..9540926e8a0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2023, 2025 Intel Corporation
*/
#ifndef __IWL_PNVM_H__
#define __IWL_PNVM_H__
@@ -14,7 +14,8 @@
int iwl_pnvm_load(struct iwl_trans *trans,
struct iwl_notif_wait_data *notif_wait,
- const struct iwl_ucode_capabilities *capa);
+ const struct iwl_ucode_capabilities *capa,
+ __le32 sku_id[3]);
static inline
void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 6adcfa6e214a..74b90bd92c48 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023, 2025 Intel Corporation
*/
#include <linux/dmi.h>
#include "iwl-drv.h"
@@ -34,6 +34,7 @@ IWL_BIOS_TABLE_LOADER(wrds_table);
IWL_BIOS_TABLE_LOADER(ewrd_table);
IWL_BIOS_TABLE_LOADER(wgds_table);
IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER(phy_filters);
IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
@@ -180,9 +181,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ fwrt->trans->info.hw_rev != CSR_HW_REV_TYPE_3160) ||
(IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ ((fwrt->trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) ==
CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
@@ -313,7 +314,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
bool send_ppag_always;
/* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
return -EOPNOTSUPP;
@@ -338,31 +339,35 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
return -EINVAL;
}
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver >= 1) {
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
+ fwrt->ppag_bios_rev);
}
} else if (cmd_ver >= 2 && cmd_ver <= 6) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
+ cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
+ if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is 0, send padded table\n");
}
+ } else if (cmd_ver == 7) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v3.gain[0];
+ *cmd_size = sizeof(cmd->v3);
+ cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source;
+ cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
+ cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
@@ -371,9 +376,11 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
/* ppag mode */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits were read from bios: %d\n",
- le32_to_cpu(cmd->v1.flags));
+ fwrt->ppag_flags);
- if (cmd_ver == 5)
+ if (cmd_ver == 6)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
+ else if (cmd_ver == 5)
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
else if (cmd_ver < 5)
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
@@ -381,16 +388,20 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
if ((cmd_ver == 1 &&
!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
+ /* The 'flags' field is the same in v1 and v2 so we can just
+ * use v1 to access it.
+ */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
- le32_to_cpu(cmd->v1.flags));
+ (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
+ le32_to_cpu(cmd->v3.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
@@ -480,7 +491,7 @@ __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
u32 val;
__le32 config_bitmap = 0;
- switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) {
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
case IWL_CFG_RF_TYPE_JF1:
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 53693314d505..9bed3d573b1e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2023-2024 Intel Corporation
+ * Copyright (C) 2023-2025 Intel Corporation
*/
#ifndef __fw_regulatory_h__
@@ -224,10 +224,14 @@ int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
- const u8 ppag_ver)
+ const u8 ppag_bios_rev)
{
- return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
- IWL_PPAG_REV3_MASK);
+ /* For revision 4 and above driver is pipe */
+ if (ppag_bios_rev >= 4)
+ return ppag_modes;
+
+ return ppag_modes & (ppag_bios_rev < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
}
bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
@@ -236,22 +240,25 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc);
#define IWL_DSBR_PERMANENT_URM_MASK BIT(9)
int iwl_bios_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+int iwl_bios_get_phy_filters(struct iwl_fw_runtime *fwrt);
static inline void iwl_bios_setup_step(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
u32 dsbr;
- if (!trans->trans_cfg->integrated)
+ if (!trans->mac_cfg->integrated)
return;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
return;
if (iwl_bios_get_dsbr(fwrt, &dsbr))
dsbr = 0;
- trans->dsbr_urm_fw_dependent = !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK);
- trans->dsbr_urm_permanent = !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK);
+ trans->conf.dsbr_urm_fw_dependent =
+ !!(dsbr & IWL_DSBR_FW_MODIFIED_URM_MASK);
+ trans->conf.dsbr_urm_permanent =
+ !!(dsbr & IWL_DSBR_PERMANENT_URM_MASK);
}
#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index 8f99e501629e..746f2acffb8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2021-2022 Intel Corporation
+ * Copyright (C) 2021-2022, 2025 Intel Corporation
*/
#include <net/mac80211.h>
@@ -91,104 +91,6 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
-static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return IWL_RATE_INVALID;
-}
-
-u32 iwl_new_rate_from_v1(u32 rate_v1)
-{
- u32 rate_v2 = 0;
- u32 dup = 0;
-
- if (rate_v1 == 0)
- return rate_v1;
- /* convert rate */
- if (rate_v1 & RATE_MCS_HT_MSK_V1) {
- u32 nss = 0;
-
- rate_v2 |= RATE_MCS_HT_MSK;
- rate_v2 |=
- rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
- nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >>
- RATE_HT_MCS_NSS_POS_V1;
- rate_v2 |= nss << RATE_MCS_NSS_POS;
- } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
- rate_v1 & RATE_MCS_HE_MSK_V1) {
- rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
-
- rate_v2 |= rate_v1 & RATE_MCS_NSS_MSK;
-
- if (rate_v1 & RATE_MCS_HE_MSK_V1) {
- u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
- u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
- u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
- RATE_MCS_HE_106T_POS_V1;
- u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
- RATE_MCS_HE_GI_LTF_POS;
-
- if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
- he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
- he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
- /* the new rate have an additional bit to
- * represent the value 4 rather then using SGI
- * bit for this purpose - as it was done in the old
- * rate */
- he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
- RATE_MCS_SGI_POS_V1;
-
- rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
- rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS;
- rate_v2 |= he_106t << RATE_MCS_HE_106T_POS;
- rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
- rate_v2 |= RATE_MCS_HE_MSK;
- } else {
- rate_v2 |= RATE_MCS_VHT_MSK;
- }
- /* if legacy format */
- } else {
- u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
-
- if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
- legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
- IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
-
- rate_v2 |= legacy_rate;
- if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
- rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
- }
-
- /* convert flags */
- if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
- rate_v2 |= RATE_MCS_LDPC_MSK;
- rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
- (rate_v1 & RATE_MCS_ANT_AB_MSK) |
- (rate_v1 & RATE_MCS_STBC_MSK) |
- (rate_v1 & RATE_MCS_BF_MSK);
-
- dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
- if (dup) {
- rate_v2 |= RATE_MCS_DUP_MSK;
- rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS;
- }
-
- if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
- (rate_v1 & RATE_MCS_SGI_MSK_V1))
- rate_v2 |= RATE_MCS_SGI_MSK;
-
- return rate_v2;
-}
-IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
@@ -197,37 +99,40 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >>
RATE_MCS_CHAN_WIDTH_POS;
u32 format = rate & RATE_MCS_MOD_TYPE_MSK;
+ int index = 0;
bool sgi;
- if (format == RATE_MCS_CCK_MSK ||
- format == RATE_MCS_LEGACY_OFDM_MSK) {
- int legacy_rate = rate & RATE_LEGACY_RATE_MSK;
- int index = format == RATE_MCS_CCK_MSK ?
- legacy_rate :
- legacy_rate + IWL_FIRST_OFDM_RATE;
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
+ index = IWL_FIRST_OFDM_RATE;
+ fallthrough;
+ case RATE_MCS_MOD_TYPE_CCK:
+ index += rate & RATE_LEGACY_RATE_MSK;
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
iwl_rs_pretty_ant(ant),
iwl_rate_mcs(index)->mbps);
- }
-
- if (format == RATE_MCS_VHT_MSK)
+ case RATE_MCS_MOD_TYPE_VHT:
type = "VHT";
- else if (format == RATE_MCS_HT_MSK)
+ break;
+ case RATE_MCS_MOD_TYPE_HT:
type = "HT";
- else if (format == RATE_MCS_HE_MSK)
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
type = "HE";
- else if (format == RATE_MCS_EHT_MSK)
+ break;
+ case RATE_MCS_MOD_TYPE_EHT:
type = "EHT";
- else
+ break;
+ default:
type = "Unknown"; /* shouldn't happen */
+ }
- mcs = format == RATE_MCS_HT_MSK ?
+ mcs = format == RATE_MCS_MOD_TYPE_HT ?
RATE_HT_MCS_INDEX(rate) :
rate & RATE_MCS_CODE_MSK;
- nss = ((rate & RATE_MCS_NSS_MSK)
- >> RATE_MCS_NSS_POS) + 1;
- sgi = format == RATE_MCS_HE_MSK ?
+ nss = u32_get_bits(rate, RATE_MCS_NSS_MSK);
+ sgi = format == RATE_MCS_MOD_TYPE_HE ?
iwl_he_is_sgi(rate) :
rate & RATE_MCS_SGI_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index a9e6bba2419e..0444a736c2b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -110,6 +110,9 @@ struct iwl_txf_iter_data {
* Only read the UEFI variables if locked.
* @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
* @geo_profiles: geographic profiles as read from WGDS BIOS table
+ * @phy_filters: specific phy filters as read from WPFC BIOS table
+ * @ppag_bios_rev: PPAG BIOS revision
+ * @ppag_bios_source: see &enum bios_source
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -178,12 +181,14 @@ struct iwl_fw_runtime {
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u8 ppag_ver;
+ u8 ppag_bios_rev;
+ u8 ppag_bios_source;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
bool uats_valid;
u8 uefi_tables_lock_status;
+ struct iwl_phy_specific_cfg phy_filters;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 3f1272014daf..90fd69b4860c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -102,7 +102,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
}
pkt = cmd.resp_pkt;
- if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+ if (fwrt->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 386aadbce2a2..48126ec6b94b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -219,7 +219,8 @@ done:
int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len,
- struct iwl_pnvm_image *pnvm_data)
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3])
{
const struct iwl_ucode_tlv *tlv;
@@ -241,23 +242,23 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
}
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
- const struct iwl_sku_id *sku_id =
+ const struct iwl_sku_id *tlv_sku_id =
(const void *)(data + sizeof(*tlv));
IWL_DEBUG_FW(trans,
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
tlv_len);
IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
- le32_to_cpu(sku_id->data[0]),
- le32_to_cpu(sku_id->data[1]),
- le32_to_cpu(sku_id->data[2]));
+ le32_to_cpu(tlv_sku_id->data[0]),
+ le32_to_cpu(tlv_sku_id->data[1]),
+ le32_to_cpu(tlv_sku_id->data[2]));
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
- if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
- trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
- trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ if (sku_id[0] == tlv_sku_id->data[0] &&
+ sku_id[1] == tlv_sku_id->data[1] &&
+ sku_id[2] == tlv_sku_id->data[2]) {
int ret = iwl_uefi_reduce_power_section(trans,
data, len,
pnvm_data);
@@ -310,11 +311,12 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
if (common_step_data->revision != 1)
return -EINVAL;
- trans->mbx_addr_0_step = (u32)common_step_data->revision |
+ trans->conf.mbx_addr_0_step =
+ (u32)common_step_data->revision |
(u32)common_step_data->cnvi_eq_channel << 8 |
(u32)common_step_data->cnvr_eq_channel << 16 |
(u32)common_step_data->radio1 << 24;
- trans->mbx_addr_1_step = (u32)common_step_data->radio2;
+ trans->conf.mbx_addr_1_step = (u32)common_step_data->radio2;
return 0;
}
@@ -323,7 +325,7 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
struct uefi_cnv_common_step_data *data;
int ret;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
data = iwl_uefi_get_verified_variable_guid(trans, &IWL_EFI_WIFI_BT_GUID,
@@ -408,8 +410,8 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
return 0;
}
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt)
+void iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_uats_data *data;
int ret;
@@ -417,17 +419,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
"UATS", sizeof(*data), NULL);
if (IS_ERR(data))
- return -EINVAL;
+ return;
ret = iwl_uefi_uats_parse(data, fwrt);
- if (ret < 0) {
+ if (ret < 0)
IWL_DEBUG_FW(trans, "Cannot read UATS table. rev is invalid\n");
- kfree(data);
- return ret;
- }
-
kfree(data);
- return 0;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
@@ -557,13 +554,14 @@ int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto out;
}
- fwrt->ppag_ver = data->revision;
+ fwrt->ppag_bios_rev = data->revision;
fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
- fwrt->ppag_ver);
+ fwrt->ppag_bios_rev);
BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
memcpy(&fwrt->ppag_chains, &data->ppag_chains,
sizeof(data->ppag_chains));
+ fwrt->ppag_bios_source = BIOS_SOURCE_UEFI;
out:
kfree(data);
return ret;
@@ -750,6 +748,10 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
}
*value = data->functions[func];
+
+ IWL_DEBUG_RADIO(fwrt,
+ "UEFI: DSM func=%d: value=%d\n", func, *value);
+
ret = 0;
out:
kfree(data);
@@ -810,3 +812,31 @@ out:
kfree(data);
return ret;
}
+
+int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_wpfc_data *data __free(kfree);
+ struct iwl_phy_specific_cfg *filters = &fwrt->phy_filters;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WPFC_NAME,
+ "WPFC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WPFC revision:%d\n",
+ data->revision);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ARRAY_SIZE(data->chains));
+
+ for (int i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
+ filters->filter_cfg_chains[i] = cpu_to_le32(data->chains[i]);
+ IWL_DEBUG_RADIO(fwrt, "WPFC: chain %d: %u\n", i, data->chains[i]);
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Loaded WPFC config from UEFI\n");
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index eb3c05417da3..5a4c557e47c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -24,6 +24,7 @@
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
#define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR"
+#define IWL_UEFI_WPFC_NAME L"WPFC"
#define IWL_SGOM_MAP_SIZE 339
@@ -33,7 +34,7 @@
#define IWL_UEFI_EWRD_REVISION 2
#define IWL_UEFI_WGDS_REVISION 3
#define IWL_UEFI_MIN_PPAG_REV 1
-#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_MAX_PPAG_REV 4
#define IWL_UEFI_MIN_WTAS_REVISION 1
#define IWL_UEFI_MAX_WTAS_REVISION 2
#define IWL_UEFI_SPLC_REVISION 0
@@ -230,6 +231,18 @@ struct uefi_cnv_wlan_dsbr_data {
u32 config;
} __packed;
+/**
+ * struct uefi_cnv_wpfc_data - BIOS Wi-Fi PHY filter Configuration
+ * @revision: the revision of the table
+ * @chains: configuration of each of the chains (a-d)
+ *
+ * specific PHY filter configuration
+ */
+struct uefi_cnv_wpfc_data {
+ u8 revision;
+ u32 chains[4];
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -240,7 +253,8 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len,
- struct iwl_pnvm_image *pnvm_data);
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3]);
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
@@ -258,10 +272,11 @@ int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt);
+void iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
+int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -271,7 +286,8 @@ static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
static inline int
iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
const u8 *data, size_t len,
- struct iwl_pnvm_image *pnvm_data)
+ struct iwl_pnvm_image *pnvm_data,
+ __le32 sku_id[3])
{
return -EOPNOTSUPP;
}
@@ -352,11 +368,9 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwr
{
}
-static inline
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt)
+static inline void
+iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
- return 0;
}
static inline
@@ -370,5 +384,10 @@ int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value)
{
return -ENOENT;
}
+
+static inline int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index acafee538b8a..91f22ce36d74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -115,7 +115,29 @@ static inline u8 num_of_ant(u8 mask)
}
/**
- * struct iwl_base_params - params not likely to change within a device family
+ * struct iwl_fw_mon_reg - FW monitor register info
+ * @addr: register address
+ * @mask: register mask
+ */
+struct iwl_fw_mon_reg {
+ u32 addr;
+ u32 mask;
+};
+
+/**
+ * struct iwl_fw_mon_regs - FW monitor registers
+ * @write_ptr: write pointer register
+ * @cycle_cnt: cycle count register
+ * @cur_frag: current fragment in use
+ */
+struct iwl_fw_mon_regs {
+ struct iwl_fw_mon_reg write_ptr;
+ struct iwl_fw_mon_reg cycle_cnt;
+ struct iwl_fw_mon_reg cur_frag;
+};
+
+/**
+ * struct iwl_family_base_params - base parameters for an entire family
* @max_ll_items: max number of OTP blocks
* @shadow_ram_support: shadow support for OTP memory
* @led_compensation: compensate on the led on/off time per HW according
@@ -124,12 +146,34 @@ static inline u8 num_of_ant(u8 mask)
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadow register support
+ * @apmg_not_supported: there's no APMG
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
* @max_tfd_queue_size: max number of entries in tfd queue.
+ * @eeprom_size: EEPROM size
+ * @num_of_queues: number of HW TX queues supported
+ * @pcie_l1_allowed: PCIe L1 state is allowed
+ * @pll_cfg: PLL configuration needed
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_passlist
+ * @smem_offset: offset from which the SMEM begins
+ * @smem_len: the length of SMEM
+ * @mac_addr_from_csr: read HW address from CSR registers at this offset
+ * @d3_debug_data_base_addr: base address where D3 debug data is stored
+ * @d3_debug_data_length: length of the D3 debug data
+ * @min_ba_txq_size: minimum number of slots required in a TX queue used
+ * for aggregation
+ * @min_txq_size: minimum number of slots required in a TX queue
+ * @gp2_reg_addr: GP2 (timer) register address
+ * @min_umac_error_event_table: minimum SMEM location of UMAC error table
+ * @mon_dbgi_regs: monitor DBGI registers
+ * @mon_dram_regs: monitor DRAM registers
+ * @mon_smem_regs: monitor SMEM registers
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
*/
-struct iwl_base_params {
+struct iwl_family_base_params {
unsigned int wd_timeout;
u16 eeprom_size;
@@ -140,6 +184,7 @@ struct iwl_base_params {
shadow_reg_enable:1,
pcie_l1_allowed:1,
apmg_wake_up_wa:1,
+ apmg_not_supported:1,
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
@@ -147,6 +192,22 @@ struct iwl_base_params {
u8 max_ll_items;
u8 led_compensation;
+ u8 ucode_api_max;
+ u8 ucode_api_min;
+ u32 mac_addr_from_csr:10;
+ u8 nvm_hw_section_num;
+ netdev_features_t features;
+ u32 smem_offset;
+ u32 smem_len;
+ u32 min_umac_error_event_table;
+ u32 d3_debug_data_base_addr;
+ u32 d3_debug_data_length;
+ u32 min_txq_size;
+ u32 gp2_reg_addr;
+ u32 min_ba_txq_size;
+ const struct iwl_fw_mon_regs mon_dram_regs;
+ const struct iwl_fw_mon_regs mon_smem_regs;
+ const struct iwl_fw_mon_regs mon_dbgi_regs;
};
/*
@@ -238,7 +299,7 @@ struct iwl_pwr_tx_backoff {
u32 backoff;
};
-enum iwl_cfg_trans_ltr_delay {
+enum iwl_mac_cfg_ltr_delay {
IWL_CFG_TRANS_LTR_DELAY_NONE = 0,
IWL_CFG_TRANS_LTR_DELAY_200US = 1,
IWL_CFG_TRANS_LTR_DELAY_2500US = 2,
@@ -246,35 +307,33 @@ enum iwl_cfg_trans_ltr_delay {
};
/**
- * struct iwl_cfg_trans_params - information needed to start the trans
+ * struct iwl_mac_cfg - information about the MAC-specific device part
*
* These values are specific to the device ID and do not change when
* multiple configs are used for a single device ID. They values are
* used, among other things, to boot the NIC so that the HW REV or
* RFID can be read before deciding the remaining parameters to use.
*
- * @base_params: pointer to basic parameters
+ * @base: pointer to basic parameters
* @device_family: the device family
* @umac_prph_offset: offset to add to UMAC periphery address
* @xtal_latency: power up latency to get the xtal stabilized
* @extra_phy_cfg_flags: extra configuration flags to pass to the PHY
- * @rf_id: need to read rf_id to determine the firmware image
* @gen2: 22000 and on transport operation
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
- * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
+ * @ltr_delay: LTR delay parameter, &enum iwl_mac_cfg_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/
-struct iwl_cfg_trans_params {
- const struct iwl_base_params *base_params;
+struct iwl_mac_cfg {
+ const struct iwl_family_base_params *base;
enum iwl_device_family device_family;
u32 umac_prph_offset;
u32 xtal_latency;
u32 extra_phy_cfg_flags;
- u32 rf_id:1,
- gen2:1,
+ u32 gen2:1,
mq_rx_supported:1,
integrated:1,
low_latency_xtal:1,
@@ -283,36 +342,21 @@ struct iwl_cfg_trans_params {
imr_enabled:1;
};
-/**
- * struct iwl_fw_mon_reg - FW monitor register info
- * @addr: register address
- * @mask: register mask
- */
-struct iwl_fw_mon_reg {
- u32 addr;
- u32 mask;
-};
-
-/**
- * struct iwl_fw_mon_regs - FW monitor registers
- * @write_ptr: write pointer register
- * @cycle_cnt: cycle count register
- * @cur_frag: current fragment in use
+/*
+ * These sizes were picked according to 8 MSDUs inside 64/256/612 A-MSDUs
+ * in an A-MPDU, with additional overhead to account for processing time.
+ * They will be doubled for MACs starting from So/Ty that don't support
+ * putting multiple frames into a single buffer.
*/
-struct iwl_fw_mon_regs {
- struct iwl_fw_mon_reg write_ptr;
- struct iwl_fw_mon_reg cycle_cnt;
- struct iwl_fw_mon_reg cur_frag;
-};
+#define IWL_NUM_RBDS_NON_HE (64 * 8)
+#define IWL_NUM_RBDS_HE (256 * 8)
+#define IWL_NUM_RBDS_EHT (512 * 8)
/**
- * struct iwl_cfg
- * @trans: the trans-specific configuration part
- * @name: Official name of the device
+ * struct iwl_rf_cfg
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as <fw_name_pre>-<api>.ucode.
- * @fw_name_mac: MAC name for this config, the remaining pieces of the
* name will be generated dynamically
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
@@ -323,34 +367,25 @@ struct iwl_fw_mon_regs {
* @non_shared_ant: the antenna that is for WiFi only
* @nvm_ver: NVM version
* @nvm_calib_ver: NVM calibration version
+ * @bw_limit: bandwidth limit for this device, if non-zero
* @ht_params: point to ht parameters
+ * @eeprom_params: EEPROM parameters (old devices)
+ * @thermal_params: Thermal throttling parameters
+ * @lp_xtal_workaround: low-power crystal workaround needed
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @tx_with_siso_diversity: 1x1 device with tx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
- * @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
- * @nvm_hw_section_num: the ID of the HW NVM section
- * @mac_addr_from_csr: read HW address from CSR registers at this offset
- * @features: hw features, any combination of feature_passlist
* @pwr_tx_backoffs: translation table between power limits and backoffs
- * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
* @dccm_offset: offset from which DCCM begins
* @dccm_len: length of DCCM (including runtime stack CCM)
* @dccm2_offset: offset from which the second DCCM begins
* @dccm2_len: length of the second DCCM
- * @smem_offset: offset from which the SMEM begins
- * @smem_len: the length of SMEM
* @vht_mu_mimo_supported: VHT MU-MIMO support
- * @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
- * @d3_debug_data_base_addr: base address where D3 debug data is stored
- * @d3_debug_data_length: length of the D3 debug data
- * @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
- * @min_ba_txq_size: minimum number of slots required in a TX queue which
- * based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
*
@@ -358,60 +393,38 @@ struct iwl_fw_mon_regs {
* API differences in uCode shouldn't be handled here but through TLVs
* and/or the uCode API version instead.
*/
-struct iwl_cfg {
- struct iwl_cfg_trans_params trans;
+struct iwl_rf_cfg {
/* params specific to an individual device within a device family */
- const char *name;
const char *fw_name_pre;
- const char *fw_name_mac;
/* params likely to change within a device family */
- const struct iwl_ht_params *ht_params;
+ const struct iwl_ht_params ht_params;
const struct iwl_eeprom_params *eeprom_params;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
- const char *default_nvm_file_C_step;
const struct iwl_tt_params *thermal_params;
enum iwl_led_mode led_mode;
enum iwl_nvm_type nvm_type;
u32 max_data_size;
u32 max_inst_size;
- netdev_features_t features;
u32 dccm_offset;
u32 dccm_len;
u32 dccm2_offset;
u32 dccm2_len;
- u32 smem_offset;
- u32 smem_len;
u16 nvm_ver;
u16 nvm_calib_ver;
+ u16 bw_limit;
u32 rx_with_siso_diversity:1,
tx_with_siso_diversity:1,
internal_wimax_coex:1,
host_interrupt_operation_mode:1,
- high_temp:1,
- mac_addr_from_csr:10,
lp_xtal_workaround:1,
- apmg_not_supported:1,
vht_mu_mimo_supported:1,
- cdb:1,
- dbgc_supported:1,
uhb_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
- u8 nvm_hw_section_num;
- u8 max_tx_agg_size;
u8 ucode_api_max;
u8 ucode_api_min;
u16 num_rbds;
- u32 min_umac_error_event_table;
- u32 d3_debug_data_base_addr;
- u32 d3_debug_data_length;
- u32 min_txq_size;
- u32 gp2_reg_addr;
- u32 min_ba_txq_size;
- const struct iwl_fw_mon_regs mon_dram_regs;
- const struct iwl_fw_mon_regs mon_smem_regs;
- const struct iwl_fw_mon_regs mon_dbgi_regs;
};
#define IWL_CFG_ANY (~0)
@@ -419,8 +432,10 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_PU 0x31
#define IWL_CFG_MAC_TYPE_TH 0x32
#define IWL_CFG_MAC_TYPE_QU 0x33
+#define IWL_CFG_MAC_TYPE_CC 0x34
#define IWL_CFG_MAC_TYPE_QUZ 0x35
#define IWL_CFG_MAC_TYPE_SO 0x37
+#define IWL_CFG_MAC_TYPE_TY 0x42
#define IWL_CFG_MAC_TYPE_SOF 0x43
#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_MAC_TYPE_BZ 0x46
@@ -432,8 +447,6 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BR 0x4C
#define IWL_CFG_MAC_TYPE_DR 0x4D
-#define IWL_CFG_RF_TYPE_TH 0x105
-#define IWL_CFG_RF_TYPE_TH1 0x108
#define IWL_CFG_RF_TYPE_JF2 0x105
#define IWL_CFG_RF_TYPE_JF1 0x108
#define IWL_CFG_RF_TYPE_HR2 0x10A
@@ -451,12 +464,6 @@ struct iwl_cfg {
#define IWL_CFG_RF_ID_HR 0x7
#define IWL_CFG_RF_ID_HR1 0x4
-#define IWL_CFG_NO_160 0x1
-#define IWL_CFG_160 0x0
-
-#define IWL_CFG_NO_320 0x1
-#define IWL_CFG_320 0x0
-
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -467,55 +474,133 @@ struct iwl_cfg {
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
+#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
+ const struct iwl_rf_cfg *cfg;
+ const char *name;
u16 device;
u16 subdevice;
- u16 mac_type;
- u16 rf_type;
- u8 mac_step;
- u8 rf_step;
- u8 rf_id;
- u8 no_160;
- u8 cores;
- u8 cdb;
- u8 jacket;
- const struct iwl_cfg *cfg;
- const char *name;
+ u32 subdevice_m_l:4,
+ subdevice_m_h:4,
+ match_rf_type:1,
+ rf_type:9,
+ match_bw_limit:1,
+ bw_limit:1,
+ match_rf_step:1,
+ rf_step:4,
+ match_rf_id:1,
+ rf_id:4,
+ match_cdb:1,
+ cdb:1;
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
extern const struct iwl_dev_info iwl_dev_info_table[];
extern const unsigned int iwl_dev_info_table_size;
const struct iwl_dev_info *
-iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
- u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb,
+ u8 rf_id, u8 bw_limit, u8 rf_step);
extern const struct pci_device_id iwl_hw_card_ids[];
#endif
/*
* This list declares the config structures for all devices.
*/
-extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_gl_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_dr_trans_cfg;
-extern const struct iwl_cfg_trans_params iwl_br_trans_cfg;
+extern const struct iwl_mac_cfg iwl1000_mac_cfg;
+extern const struct iwl_mac_cfg iwl5000_mac_cfg;
+extern const struct iwl_mac_cfg iwl2000_mac_cfg;
+extern const struct iwl_mac_cfg iwl2030_mac_cfg;
+extern const struct iwl_mac_cfg iwl105_mac_cfg;
+extern const struct iwl_mac_cfg iwl135_mac_cfg;
+extern const struct iwl_mac_cfg iwl5150_mac_cfg;
+extern const struct iwl_mac_cfg iwl6005_mac_cfg;
+extern const struct iwl_mac_cfg iwl6030_mac_cfg;
+extern const struct iwl_mac_cfg iwl6000i_mac_cfg;
+extern const struct iwl_mac_cfg iwl6050_mac_cfg;
+extern const struct iwl_mac_cfg iwl6150_mac_cfg;
+extern const struct iwl_mac_cfg iwl6000_mac_cfg;
+extern const struct iwl_mac_cfg iwl7000_mac_cfg;
+extern const struct iwl_mac_cfg iwl8000_mac_cfg;
+extern const struct iwl_mac_cfg iwl9000_mac_cfg;
+extern const struct iwl_mac_cfg iwl9560_mac_cfg;
+extern const struct iwl_mac_cfg iwl9560_long_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl9560_shared_clk_mac_cfg;
+extern const struct iwl_mac_cfg iwl_qu_mac_cfg;
+extern const struct iwl_mac_cfg iwl_qu_medium_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl_qu_long_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl_ax200_mac_cfg;
+extern const struct iwl_mac_cfg iwl_ty_mac_cfg;
+extern const struct iwl_mac_cfg iwl_so_mac_cfg;
+extern const struct iwl_mac_cfg iwl_so_long_latency_mac_cfg;
+extern const struct iwl_mac_cfg iwl_so_long_latency_imr_mac_cfg;
+extern const struct iwl_mac_cfg iwl_ma_mac_cfg;
+extern const struct iwl_mac_cfg iwl_bz_mac_cfg;
+extern const struct iwl_mac_cfg iwl_gl_mac_cfg;
+extern const struct iwl_mac_cfg iwl_sc_mac_cfg;
+extern const struct iwl_mac_cfg iwl_dr_mac_cfg;
+
+extern const char iwl1000_bgn_name[];
+extern const char iwl1000_bg_name[];
+extern const char iwl100_bgn_name[];
+extern const char iwl100_bg_name[];
+extern const char iwl2000_2bgn_name[];
+extern const char iwl2000_2bgn_d_name[];
+extern const char iwl2030_2bgn_name[];
+extern const char iwl105_bgn_name[];
+extern const char iwl105_bgn_d_name[];
+extern const char iwl135_bgn_name[];
+extern const char iwl5300_agn_name[];
+extern const char iwl5100_bgn_name[];
+extern const char iwl5100_abg_name[];
+extern const char iwl5100_agn_name[];
+extern const char iwl5350_agn_name[];
+extern const char iwl5150_agn_name[];
+extern const char iwl5150_abg_name[];
+extern const char iwl6005_2agn_name[];
+extern const char iwl6005_2abg_name[];
+extern const char iwl6005_2bg_name[];
+extern const char iwl6005_2agn_sff_name[];
+extern const char iwl6005_2agn_d_name[];
+extern const char iwl6005_2agn_mow1_name[];
+extern const char iwl6005_2agn_mow2_name[];
+extern const char iwl6030_2agn_name[];
+extern const char iwl6030_2abg_name[];
+extern const char iwl6030_2bgn_name[];
+extern const char iwl6030_2bg_name[];
+extern const char iwl6035_2agn_name[];
+extern const char iwl6035_2agn_sff_name[];
+extern const char iwl1030_bgn_name[];
+extern const char iwl1030_bg_name[];
+extern const char iwl130_bgn_name[];
+extern const char iwl130_bg_name[];
+extern const char iwl6000i_2agn_name[];
+extern const char iwl6000i_2abg_name[];
+extern const char iwl6000i_2bg_name[];
+extern const char iwl6050_2agn_name[];
+extern const char iwl6050_2abg_name[];
+extern const char iwl6150_bgn_name[];
+extern const char iwl6150_bg_name[];
+extern const char iwl6000_3agn_name[];
+extern const char iwl7260_2ac_name[];
+extern const char iwl7260_2n_name[];
+extern const char iwl7260_n_name[];
+extern const char iwl3160_2ac_name[];
+extern const char iwl3160_2n_name[];
+extern const char iwl3160_n_name[];
+extern const char iwl3165_2ac_name[];
+extern const char iwl3168_2ac_name[];
+extern const char iwl7265_2ac_name[];
+extern const char iwl7265_2n_name[];
+extern const char iwl7265_n_name[];
+extern const char iwl8260_2n_name[];
+extern const char iwl8260_2ac_name[];
+extern const char iwl8265_2ac_name[];
+extern const char iwl8275_2ac_name[];
+extern const char iwl4165_2ac_name[];
+extern const char iwl_killer_1435i_name[];
+extern const char iwl_killer_1434_kix_name[];
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -548,129 +633,84 @@ extern const char iwl_ax211_killer_1675s_name[];
extern const char iwl_ax211_killer_1675i_name[];
extern const char iwl_ax411_killer_1690s_name[];
extern const char iwl_ax411_killer_1690i_name[];
+extern const char iwl_ax210_name[];
extern const char iwl_ax211_name[];
-extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
-extern const char iwl_fm_name[];
-extern const char iwl_wh_name[];
-extern const char iwl_gl_name[];
-extern const char iwl_mtp_name[];
-extern const char iwl_dr_name[];
-extern const char iwl_br_name[];
+extern const char iwl_killer_be1750s_name[];
+extern const char iwl_killer_be1750i_name[];
+extern const char iwl_killer_be1750w_name[];
+extern const char iwl_killer_be1750x_name[];
+extern const char iwl_killer_be1790s_name[];
+extern const char iwl_killer_be1790i_name[];
+extern const char iwl_be201_name[];
+extern const char iwl_be200_name[];
+extern const char iwl_be202_name[];
+extern const char iwl_be401_name[];
+extern const char iwl_be213_name[];
+extern const char iwl_killer_be1775s_name[];
+extern const char iwl_killer_be1775i_name[];
+extern const char iwl_be211_name[];
+extern const char iwl_killer_bn1850w2_name[];
+extern const char iwl_killer_bn1850i_name[];
+extern const char iwl_bn201_name[];
+extern const char iwl_be221_name[];
+extern const char iwl_be223_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
-extern const struct iwl_cfg iwl5300_agn_cfg;
-extern const struct iwl_cfg iwl5100_agn_cfg;
-extern const struct iwl_cfg iwl5350_agn_cfg;
-extern const struct iwl_cfg iwl5100_bgn_cfg;
-extern const struct iwl_cfg iwl5100_abg_cfg;
-extern const struct iwl_cfg iwl5150_agn_cfg;
-extern const struct iwl_cfg iwl5150_abg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_cfg;
-extern const struct iwl_cfg iwl6005_2abg_cfg;
-extern const struct iwl_cfg iwl6005_2bg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
-extern const struct iwl_cfg iwl6005_2agn_d_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
-extern const struct iwl_cfg iwl1030_bgn_cfg;
-extern const struct iwl_cfg iwl1030_bg_cfg;
-extern const struct iwl_cfg iwl6030_2agn_cfg;
-extern const struct iwl_cfg iwl6030_2abg_cfg;
-extern const struct iwl_cfg iwl6030_2bgn_cfg;
-extern const struct iwl_cfg iwl6030_2bg_cfg;
-extern const struct iwl_cfg iwl6000i_2agn_cfg;
-extern const struct iwl_cfg iwl6000i_2abg_cfg;
-extern const struct iwl_cfg iwl6000i_2bg_cfg;
-extern const struct iwl_cfg iwl6000_3agn_cfg;
-extern const struct iwl_cfg iwl6050_2agn_cfg;
-extern const struct iwl_cfg iwl6050_2abg_cfg;
-extern const struct iwl_cfg iwl6150_bgn_cfg;
-extern const struct iwl_cfg iwl6150_bg_cfg;
-extern const struct iwl_cfg iwl1000_bgn_cfg;
-extern const struct iwl_cfg iwl1000_bg_cfg;
-extern const struct iwl_cfg iwl100_bgn_cfg;
-extern const struct iwl_cfg iwl100_bg_cfg;
-extern const struct iwl_cfg iwl130_bgn_cfg;
-extern const struct iwl_cfg iwl130_bg_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
-extern const struct iwl_cfg iwl2030_2bgn_cfg;
-extern const struct iwl_cfg iwl6035_2agn_cfg;
-extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
-extern const struct iwl_cfg iwl105_bgn_cfg;
-extern const struct iwl_cfg iwl105_bgn_d_cfg;
-extern const struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_rf_cfg iwl5300_agn_cfg;
+extern const struct iwl_rf_cfg iwl5350_agn_cfg;
+extern const struct iwl_rf_cfg iwl5100_n_cfg;
+extern const struct iwl_rf_cfg iwl5100_abg_cfg;
+extern const struct iwl_rf_cfg iwl5150_agn_cfg;
+extern const struct iwl_rf_cfg iwl5150_abg_cfg;
+extern const struct iwl_rf_cfg iwl6005_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6005_n_cfg;
+extern const struct iwl_rf_cfg iwl6030_n_cfg;
+extern const struct iwl_rf_cfg iwl6030_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_rf_cfg iwl6000i_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6000i_non_n_cfg;
+extern const struct iwl_rf_cfg iwl6000_3agn_cfg;
+extern const struct iwl_rf_cfg iwl6050_2agn_cfg;
+extern const struct iwl_rf_cfg iwl6050_2abg_cfg;
+extern const struct iwl_rf_cfg iwl6150_bgn_cfg;
+extern const struct iwl_rf_cfg iwl6150_bg_cfg;
+extern const struct iwl_rf_cfg iwl1000_bgn_cfg;
+extern const struct iwl_rf_cfg iwl1000_bg_cfg;
+extern const struct iwl_rf_cfg iwl100_bgn_cfg;
+extern const struct iwl_rf_cfg iwl100_bg_cfg;
+extern const struct iwl_rf_cfg iwl130_bgn_cfg;
+extern const struct iwl_rf_cfg iwl130_bg_cfg;
+extern const struct iwl_rf_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_rf_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_rf_cfg iwl6035_2agn_cfg;
+extern const struct iwl_rf_cfg iwl105_bgn_cfg;
+extern const struct iwl_rf_cfg iwl135_bgn_cfg;
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
-extern const struct iwl_ht_params iwl_22000_ht_params;
-extern const struct iwl_cfg iwl7260_2ac_cfg;
-extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
-extern const struct iwl_cfg iwl7260_2n_cfg;
-extern const struct iwl_cfg iwl7260_n_cfg;
-extern const struct iwl_cfg iwl3160_2ac_cfg;
-extern const struct iwl_cfg iwl3160_2n_cfg;
-extern const struct iwl_cfg iwl3160_n_cfg;
-extern const struct iwl_cfg iwl3165_2ac_cfg;
-extern const struct iwl_cfg iwl3168_2ac_cfg;
-extern const struct iwl_cfg iwl7265_2ac_cfg;
-extern const struct iwl_cfg iwl7265_2n_cfg;
-extern const struct iwl_cfg iwl7265_n_cfg;
-extern const struct iwl_cfg iwl7265d_2ac_cfg;
-extern const struct iwl_cfg iwl7265d_2n_cfg;
-extern const struct iwl_cfg iwl7265d_n_cfg;
-extern const struct iwl_cfg iwl8260_2n_cfg;
-extern const struct iwl_cfg iwl8260_2ac_cfg;
-extern const struct iwl_cfg iwl8265_2ac_cfg;
-extern const struct iwl_cfg iwl8275_2ac_cfg;
-extern const struct iwl_cfg iwl4165_2ac_cfg;
-extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg;
-extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg;
-extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg;
-extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
-extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
-extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
-extern const struct iwl_cfg iwl_qu_b0_hr_b0;
-extern const struct iwl_cfg iwl_qu_c0_hr_b0;
-extern const struct iwl_cfg iwl_ax200_cfg_cc;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
-extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
-extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
-extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
-extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
-extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
-extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
-extern const struct iwl_cfg killer1650x_2ax_cfg;
-extern const struct iwl_cfg killer1650w_2ax_cfg;
-extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0;
-extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
-extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long;
-extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
-extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
-extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
-
-extern const struct iwl_cfg iwl_cfg_ma;
-
-extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
-extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+extern const struct iwl_rf_cfg iwl7260_cfg;
+extern const struct iwl_rf_cfg iwl7260_high_temp_cfg;
+extern const struct iwl_rf_cfg iwl3160_cfg;
+extern const struct iwl_rf_cfg iwl3165_2ac_cfg;
+extern const struct iwl_rf_cfg iwl3168_2ac_cfg;
+extern const struct iwl_rf_cfg iwl7265_cfg;
+extern const struct iwl_rf_cfg iwl7265d_cfg;
+extern const struct iwl_rf_cfg iwl8260_cfg;
+extern const struct iwl_rf_cfg iwl8265_cfg;
+extern const struct iwl_rf_cfg iwl_rf_jf;
+extern const struct iwl_rf_cfg iwl_rf_jf_80mhz;
+extern const struct iwl_rf_cfg iwl_rf_hr1;
+extern const struct iwl_rf_cfg iwl_rf_hr;
+extern const struct iwl_rf_cfg iwl_rf_hr_80mhz;
+
+extern const struct iwl_rf_cfg iwl_rf_gf;
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
-extern const struct iwl_ht_params iwl_bz_ht_params;
-
-extern const struct iwl_ht_params iwl_bz_ht_params;
-
-extern const struct iwl_cfg iwl_cfg_bz;
-extern const struct iwl_cfg iwl_cfg_gl;
-
-extern const struct iwl_cfg iwl_cfg_sc;
-extern const struct iwl_cfg iwl_cfg_sc2;
-extern const struct iwl_cfg iwl_cfg_sc2f;
-extern const struct iwl_cfg iwl_cfg_dr;
-extern const struct iwl_cfg iwl_cfg_br;
+extern const struct iwl_rf_cfg iwl_rf_fm;
+extern const struct iwl_rf_cfg iwl_rf_fm_160mhz;
+#define iwl_rf_wh iwl_rf_fm
+#define iwl_rf_wh_160mhz iwl_rf_fm_160mhz
+#define iwl_rf_pe iwl_rf_fm
#endif /* CONFIG_IWLMLD */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h
index 20563a32a21a..8c5c0ea46181 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h
@@ -2,8 +2,8 @@
/*
* Copyright (C) 2018, 2020-2025 Intel Corporation
*/
-#ifndef __iwl_context_info_file_gen3_h__
-#define __iwl_context_info_file_gen3_h__
+#ifndef __iwl_context_info_file_v2_h__
+#define __iwl_context_info_file_v2_h__
#include "iwl-context-info.h"
@@ -58,6 +58,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
* @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
* upon reset.
+ * @IWL_PRPH_SCRATCH_TOP_RESET: request TOP reset
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
@@ -74,15 +75,18 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
+ IWL_PRPH_SCRATCH_TOP_RESET = BIT(30),
};
/**
* enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
+ * @IWL_PRPH_SCRATCH_EXT_EXT_FSEQ: external FSEQ image provided
* @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
* @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
* @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock
*/
enum iwl_prph_scratch_ext_flags {
+ IWL_PRPH_SCRATCH_EXT_EXT_FSEQ = BIT(0),
IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8),
@@ -202,6 +206,19 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_step_cfg step_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+#define IWL_NUM_DRAM_FSEQ_ENTRIES 8
+
+/**
+ * struct iwl_context_info_dram_fseq - images DRAM map (with fseq)
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @common: UMAC/LMAC/virtual images
+ * @fseq_img: FSEQ image DRAM map
+ */
+struct iwl_context_info_dram_fseq {
+ struct iwl_context_info_dram_nonfseq common;
+ __le64 fseq_img[IWL_NUM_DRAM_FSEQ_ENTRIES];
+} __packed; /* PERIPH_SCRATCH_DRAM_MAP_S */
+
/**
* struct iwl_prph_scratch - peripheral scratch mapping
* @ctrl_cfg: control and configuration of prph scratch
@@ -215,7 +232,7 @@ struct iwl_prph_scratch {
__le32 fseq_override;
__le32 step_analog_params;
__le32 reserved[8];
- struct iwl_context_info_dram dram;
+ struct iwl_context_info_dram_fseq dram;
} __packed; /* PERIPH_SCRATCH_S */
/**
@@ -233,7 +250,7 @@ struct iwl_prph_info {
} __packed; /* PERIPH_INFO_S */
/**
- * struct iwl_context_info_gen3 - device INIT configuration
+ * struct iwl_context_info_v2 - device INIT configuration
* @version: version of the context information
* @size: size of context information in DWs
* @config: context in which the peripheral would execute - a subset of
@@ -276,7 +293,7 @@ struct iwl_prph_info {
* @prph_scratch_size: the size of the peripheral scratch structure in DWs
* @reserved: reserved
*/
-struct iwl_context_info_gen3 {
+struct iwl_context_info_v2 {
__le16 version;
__le16 size;
__le32 config;
@@ -306,22 +323,22 @@ struct iwl_context_info_gen3 {
__le32 reserved;
} __packed; /* IPC_CONTEXT_INFO_S */
-int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
- const struct fw_img *fw);
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
+int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img);
+void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive);
-int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa);
-void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
+int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_payloads,
+ const struct iwl_ucode_capabilities *capa);
+void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
int
-iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa);
+iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
void
-iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
- u32 mbx_addr_0_step, u32 mbx_addr_1_step);
-#endif /* __iwl_context_info_file_gen3_h__ */
+iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
+#endif /* __iwl_context_info_file_v2_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index dfd44fabf237..7ae0fbdef208 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022, 2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2022, 2024-2025 Intel Corporation
*/
#ifndef __iwl_context_info_file_h__
#define __iwl_context_info_file_h__
@@ -78,13 +78,13 @@ struct iwl_context_info_control {
} __packed;
/**
- * struct iwl_context_info_dram - images DRAM map
+ * struct iwl_context_info_dram_nonfseq - images DRAM map
* each entry in the map represents a DRAM chunk of up to 32 KB
* @umac_img: UMAC image DRAM map
* @lmac_img: LMAC image DRAM map
* @virtual_img: paged image DRAM map
*/
-struct iwl_context_info_dram {
+struct iwl_context_info_dram_nonfseq {
__le64 umac_img[IWL_MAX_DRAM_ENTRY];
__le64 lmac_img[IWL_MAX_DRAM_ENTRY];
__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
@@ -177,16 +177,16 @@ struct iwl_context_info {
struct iwl_context_info_early_dbg_cfg edbg_cfg;
struct iwl_context_info_pnvm_cfg pnvm_cfg;
__le32 reserved2[16];
- struct iwl_context_info_dram dram;
+ struct iwl_context_info_dram_nonfseq dram;
__le32 reserved3[16];
-} __packed;
+} __packed; /* BOOT_LOADER_CONTEXT_INFO_S */
-int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *img);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
- struct iwl_context_info_dram *ctxt_dram);
+ struct iwl_context_info_dram_nonfseq *ctxt_dram);
void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
size_t size,
dma_addr_t *phys);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 3ff493e920d2..0fd452cb94ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -107,6 +107,13 @@
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
+#define CSR_IPC_STATE (CSR_BASE + 0x110)
+#define CSR_IPC_STATE_RESET 0x00000030
+#define CSR_IPC_STATE_RESET_NONE 0
+#define CSR_IPC_STATE_RESET_SW_READY 1
+#define CSR_IPC_STATE_RESET_TOP_READY 2
+#define CSR_IPC_STATE_RESET_TOP_FOLLOWER 3
+
#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114)
#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3
#define CSR_IPC_SLEEP_CONTROL_RESUME 0
@@ -194,17 +201,19 @@
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
+#define CSR_INT_BIT_RESET_DONE (1 << 2) /* reset handshake with firmware is done */
#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
-#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
- CSR_INT_BIT_HW_ERR | \
- CSR_INT_BIT_FH_TX | \
- CSR_INT_BIT_SW_ERR | \
- CSR_INT_BIT_RF_KILL | \
- CSR_INT_BIT_SW_RX | \
- CSR_INT_BIT_WAKEUP | \
- CSR_INT_BIT_ALIVE | \
+#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
+ CSR_INT_BIT_HW_ERR | \
+ CSR_INT_BIT_FH_TX | \
+ CSR_INT_BIT_SW_ERR | \
+ CSR_INT_BIT_RF_KILL | \
+ CSR_INT_BIT_SW_RX | \
+ CSR_INT_BIT_WAKEUP | \
+ CSR_INT_BIT_RESET_DONE | \
+ CSR_INT_BIT_ALIVE | \
CSR_INT_BIT_RX_PERIODIC)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
@@ -641,7 +650,7 @@ enum msix_hw_int_causes {
* HW address related registers *
*****************************************************************************/
-#define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr)
+#define CSR_ADDR_BASE(trans) ((trans)->mac_cfg->base->mac_addr_from_csr)
#define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00)
#define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04)
#define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index ce787326aa69..5c8f6dc9a3e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -503,7 +503,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
int res;
if (!iwlwifi_mod_params.enable_ini ||
- trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
+ trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
return;
res = firmware_request_nowarn(&fw, yoyo_bin, dev);
@@ -603,11 +603,11 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
- if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
- } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
+ } else if (fwrt->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
return -EIO;
}
@@ -1241,7 +1241,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
fwrt->trans->dbg.restart_required = false;
- if (fwrt->trans->trans_cfg->device_family ==
+ if (fwrt->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index 76166e1b10e5..99789c7cef3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(C) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018, 2023 Intel Corporation
+ * Copyright(c) 2018, 2023, 2025 Intel Corporation
*****************************************************************************/
#ifndef __IWLWIFI_DEVICE_TRACE
@@ -54,11 +54,11 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
struct ieee80211_hdr *hdr = NULL;
size_t hdr_offset;
- if (cmd->cmd != trans->rx_mpdu_cmd)
+ if (cmd->cmd != trans->conf.rx_mpdu_cmd)
return len;
hdr_offset = sizeof(struct iwl_cmd_header) +
- trans->rx_mpdu_cmd_hdr_size;
+ trans->conf.rx_mpdu_cmd_hdr_size;
if (out_hdr_offset)
*out_hdr_offset = hdr_offset;
@@ -67,7 +67,8 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
if (!ieee80211_is_data(hdr->frame_control))
return len;
/* maybe try to identify EAPOL frames? */
- return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+ return sizeof(__le32) + sizeof(*cmd) +
+ trans->conf.rx_mpdu_cmd_hdr_size +
ieee80211_hdrlen(hdr->frame_control);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d36837501e08..9504a0cb8b13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -176,25 +176,86 @@ static inline char iwl_drv_get_step(int step)
static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans)
{
- return CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM;
+ return CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM;
}
const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
{
char mac_step, rf_step;
- const char *rf, *cdb;
+ const char *mac, *rf, *cdb;
if (trans->cfg->fw_name_pre)
return trans->cfg->fw_name_pre;
- if (WARN_ON(!trans->cfg->fw_name_mac))
- return "unconfigured";
+ mac_step = iwl_drv_get_step(trans->info.hw_rev_step);
- mac_step = iwl_drv_get_step(trans->hw_rev_step);
+ switch (CSR_HW_REV_TYPE(trans->info.hw_rev)) {
+ case IWL_CFG_MAC_TYPE_PU:
+ mac = "9000-pu";
+ mac_step = 'b';
+ break;
+ case IWL_CFG_MAC_TYPE_TH:
+ mac = "9260-th";
+ mac_step = 'b';
+ break;
+ case IWL_CFG_MAC_TYPE_QU:
+ mac = "Qu";
+ break;
+ case IWL_CFG_MAC_TYPE_CC:
+ /* special case - no RF since it's fixed (discrete) */
+ scnprintf(buf, FW_NAME_PRE_BUFSIZE, "iwlwifi-cc-a0");
+ return buf;
+ case IWL_CFG_MAC_TYPE_QUZ:
+ mac = "QuZ";
+ /* all QuZ use A0 firmware */
+ mac_step = 'a';
+ break;
+ case IWL_CFG_MAC_TYPE_SO:
+ case IWL_CFG_MAC_TYPE_SOF:
+ mac = "so";
+ mac_step = 'a';
+ break;
+ case IWL_CFG_MAC_TYPE_TY:
+ mac = "ty";
+ mac_step = 'a';
+ break;
+ case IWL_CFG_MAC_TYPE_MA:
+ mac = "ma";
+ break;
+ case IWL_CFG_MAC_TYPE_BZ:
+ case IWL_CFG_MAC_TYPE_BZ_W:
+ mac = "bz";
+ break;
+ case IWL_CFG_MAC_TYPE_GL:
+ mac = "gl";
+ break;
+ case IWL_CFG_MAC_TYPE_SC:
+ mac = "sc";
+ break;
+ case IWL_CFG_MAC_TYPE_SC2:
+ mac = "sc2";
+ break;
+ case IWL_CFG_MAC_TYPE_SC2F:
+ mac = "sc2f";
+ break;
+ case IWL_CFG_MAC_TYPE_BR:
+ mac = "br";
+ break;
+ case IWL_CFG_MAC_TYPE_DR:
+ mac = "dr";
+ break;
+ default:
+ return "unknown-mac";
+ }
- rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id));
+ rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->info.hw_rf_id));
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ rf = "jf";
+ rf_step = 'b';
+ break;
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
@@ -202,29 +263,26 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
+ rf_step = 'a';
break;
case IWL_CFG_RF_TYPE_FM:
rf = "fm";
break;
case IWL_CFG_RF_TYPE_WH:
- if (SILICON_Z_STEP ==
- CSR_HW_RFID_STEP(trans->hw_rf_id)) {
- rf = "whtc";
- rf_step = 'a';
- } else {
- rf = "wh";
- }
+ rf = "wh";
+ break;
+ case IWL_CFG_RF_TYPE_PE:
+ rf = "pe";
break;
default:
return "unknown-rf";
}
- cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : "";
+ cdb = CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id) ? "4" : "";
scnprintf(buf, FW_NAME_PRE_BUFSIZE,
"iwlwifi-%s-%c0-%s%s-%c0",
- trans->cfg->fw_name_mac, mac_step,
- rf, cdb, rf_step);
+ mac, mac_step, rf, cdb, rf_step);
return buf;
}
@@ -233,39 +291,64 @@ IWL_EXPORT_SYMBOL(iwl_drv_get_fwname_pre);
static void iwl_req_fw_callback(const struct firmware *ucode_raw,
void *context);
+static void iwl_get_ucode_api_versions(struct iwl_trans *trans,
+ unsigned int *api_min,
+ unsigned int *api_max)
+{
+ const struct iwl_family_base_params *base = trans->mac_cfg->base;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
+
+ if (!base->ucode_api_max) {
+ *api_min = cfg->ucode_api_min;
+ *api_max = cfg->ucode_api_max;
+ return;
+ }
+
+ if (!cfg->ucode_api_max) {
+ *api_min = base->ucode_api_min;
+ *api_max = base->ucode_api_max;
+ return;
+ }
+
+ *api_min = max(cfg->ucode_api_min, base->ucode_api_min);
+ *api_max = min(cfg->ucode_api_max, base->ucode_api_max);
+}
+
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
- const struct iwl_cfg *cfg = drv->trans->cfg;
char _fw_name_pre[FW_NAME_PRE_BUFSIZE];
+ unsigned int ucode_api_max, ucode_api_min;
const char *fw_name_pre;
- if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
- (drv->trans->hw_rev_step != SILICON_B_STEP &&
- drv->trans->hw_rev_step != SILICON_C_STEP)) {
+ iwl_get_ucode_api_versions(drv->trans, &ucode_api_min, &ucode_api_max);
+
+ if (drv->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ (drv->trans->info.hw_rev_step != SILICON_B_STEP &&
+ drv->trans->info.hw_rev_step != SILICON_C_STEP)) {
IWL_ERR(drv,
"Only HW steps B and C are currently supported (0x%0x)\n",
- drv->trans->hw_rev);
+ drv->trans->info.hw_rev);
return -EINVAL;
}
fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre);
if (first)
- drv->fw_index = cfg->ucode_api_max;
+ drv->fw_index = ucode_api_max;
else
drv->fw_index--;
- if (drv->fw_index < cfg->ucode_api_min) {
+ if (drv->fw_index < ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
- if (cfg->ucode_api_min == cfg->ucode_api_max) {
+ if (ucode_api_min == ucode_api_max) {
IWL_ERR(drv, "%s-%d is required\n", fw_name_pre,
- cfg->ucode_api_max);
+ ucode_api_max);
} else {
IWL_ERR(drv, "minimum version required: %s-%d\n",
- fw_name_pre, cfg->ucode_api_min);
+ fw_name_pre, ucode_api_min);
IWL_ERR(drv, "maximum version supported: %s-%d\n",
- fw_name_pre, cfg->ucode_api_max);
+ fw_name_pre, ucode_api_max);
}
IWL_ERR(drv,
@@ -1235,7 +1318,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
- if (drv->trans->trans_cfg->device_family <
+ if (drv->trans->mac_cfg->device_family <
IWL_DEVICE_FAMILY_22000)
break;
drv->trans->dbg.umac_error_event_table =
@@ -1251,7 +1334,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*dbg_ptrs))
goto invalid_tlv_len;
- if (drv->trans->trans_cfg->device_family <
+ if (drv->trans->mac_cfg->device_family <
IWL_DEVICE_FAMILY_22000)
break;
drv->trans->dbg.lmac_error_event_table[0] =
@@ -1373,7 +1456,7 @@ static int iwl_alloc_ucode(struct iwl_drv *drv,
static int validate_sec_sizes(struct iwl_drv *drv,
struct iwl_firmware_pieces *pieces,
- const struct iwl_cfg *cfg)
+ const struct iwl_rf_cfg *cfg)
{
IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n",
get_sec_size(pieces, IWL_UCODE_REGULAR,
@@ -1496,14 +1579,15 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces *pieces;
- const unsigned int api_max = drv->trans->cfg->ucode_api_max;
- const unsigned int api_min = drv->trans->cfg->ucode_api_min;
+ unsigned int api_min, api_max;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
bool usniffer_images = false;
bool failure = true;
+ iwl_get_ucode_api_versions(drv->trans, &api_min, &api_max);
+
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
@@ -1697,14 +1781,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
else
fw->init_evtlog_size =
- drv->trans->trans_cfg->base_params->max_event_log_size;
+ drv->trans->mac_cfg->base->max_event_log_size;
fw->init_errlog_ptr = pieces->init_errlog_ptr;
fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
if (pieces->inst_evtlog_size)
fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
else
fw->inst_evtlog_size =
- drv->trans->trans_cfg->base_params->max_event_log_size;
+ drv->trans->mac_cfg->base->max_event_log_size;
fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 854957bdf79d..595300a14639 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021, 2023, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#ifndef __iwl_drv_h__
@@ -53,7 +53,7 @@
struct iwl_drv;
struct iwl_trans;
-struct iwl_cfg;
+struct iwl_rf_cfg;
/**
* iwl_drv_start - start the drv
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 5c8f1868db64..0f6de08b7473 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2025 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -71,7 +71,7 @@
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
unsigned int chnl)
{
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
WARN_ON_ONCE(chnl >= 64);
return TFH_TFDQ_CBB_TABLE + 8 * chnl;
}
@@ -378,14 +378,14 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
#define RFH_GEN_STATUS 0xA09808
-#define RFH_GEN_STATUS_GEN3 0xA07824
+#define RFH_GEN_STATUS_AX210 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
#define RFH_RXF_DMA_CFG 0xA09820
-#define RFH_RXF_DMA_CFG_GEN3 0xA07880
+#define RFH_RXF_DMA_CFG_AX210 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -588,13 +588,12 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
-#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
+#define TFD_QUEUE_BC_SIZE_AX210 1024
+#define TFD_QUEUE_BC_SIZE_BZ (1024 * 4)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
@@ -717,30 +716,19 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_scd_bc_tbl - scheduler byte count table
+ * struct iwl_bc_tbl_entry - scheduler byte count table entry
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
* For devices up to 22000:
* 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
+ * For 22000 and on:
* 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwlagn_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-} __packed;
-
-/**
- * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
- * For AX210 and on:
- * @tfd_offset: 0-12 - tx command byte count
- * 12-13 - number of 64 byte chunks
- * 14-16 - reserved
- */
-struct iwl_gen3_bc_tbl_entry {
+struct iwl_bc_tbl_entry {
__le16 tfd_offset;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 0653ca8b974a..80591809164e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
#include <linux/delay.h>
@@ -211,13 +211,13 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
void iwl_force_nmi(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
DEVICE_SET_NMI_VAL_DRV, 1);
- else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
- else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_NMI_BIT);
else
@@ -260,7 +260,7 @@ struct reg {
static int iwl_dump_rfh(struct iwl_trans *trans, char **buf)
{
int i, q;
- int num_q = trans->num_rx_queues;
+ int num_q = trans->info.num_rxqs;
static const u32 rfh_tbl[] = {
RFH_RXF_DMA_CFG,
RFH_GEN_CFG,
@@ -368,7 +368,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
FH_TSSR_TX_ERROR_REG
};
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
return iwl_dump_rfh(trans, buf);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -423,7 +423,7 @@ static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
static void iwl_dump_host_monitor(struct iwl_trans *trans)
{
- switch (trans->trans_cfg->device_family) {
+ switch (trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_22000:
case IWL_DEVICE_FAMILY_AX210:
IWL_ERR(trans, "CSR_RESET = 0x%x\n",
@@ -445,11 +445,11 @@ static void iwl_dump_host_monitor(struct iwl_trans *trans)
int iwl_finish_nic_init(struct iwl_trans *trans)
{
- const struct iwl_cfg_trans_params *cfg_trans = trans->trans_cfg;
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
u32 poll_ready;
int err;
- if (cfg_trans->bisr_workaround) {
+ if (mac_cfg->bisr_workaround) {
/* ensure the TOP FSM isn't still in previous reset */
mdelay(2);
}
@@ -458,7 +458,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
- if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
@@ -469,7 +469,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
}
- if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
+ if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -484,7 +484,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
iwl_dump_host_monitor(trans);
}
- if (cfg_trans->bisr_workaround) {
+ if (mac_cfg->bisr_workaround) {
/* ensure BISR shift has finished */
udelay(200);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 37b3bd62897e..f4833c5fe86e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2021, 2025 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -64,38 +64,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
*/
static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return ofs + trans->trans_cfg->umac_prph_offset;
+ return ofs + trans->mac_cfg->umac_prph_offset;
}
static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
{
return iwl_read_prph_no_grab(trans, ofs +
- trans->trans_cfg->umac_prph_offset);
+ trans->mac_cfg->umac_prph_offset);
}
static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
{
- return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
+ return iwl_read_prph(trans, ofs + trans->mac_cfg->umac_prph_offset);
}
static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph_no_grab(trans, ofs + trans->trans_cfg->umac_prph_offset,
+ iwl_write_prph_no_grab(trans, ofs + trans->mac_cfg->umac_prph_offset,
val);
}
static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
u32 val)
{
- iwl_write_prph(trans, ofs + trans->trans_cfg->umac_prph_offset, val);
+ iwl_write_prph(trans, ofs + trans->mac_cfg->umac_prph_offset, val);
}
static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
{
return iwl_poll_prph_bit(trans, addr +
- trans->trans_cfg->umac_prph_offset,
+ trans->mac_cfg->umac_prph_offset,
bits, mask, timeout);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c381511e9ec6..0592f0f59d1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -143,6 +143,9 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
* @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
* when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
+ * AP allowed only in 20 MHz. Valid only
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
* @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
* when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
* @NVM_CHANNEL_RADAR: radar detection required
@@ -159,20 +162,21 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
- NVM_CHANNEL_VALID = BIT(0),
- NVM_CHANNEL_IBSS = BIT(1),
- NVM_CHANNEL_ACTIVE = BIT(3),
- NVM_CHANNEL_RADAR = BIT(4),
- NVM_CHANNEL_INDOOR_ONLY = BIT(5),
- NVM_CHANNEL_GO_CONCURRENT = BIT(6),
- NVM_CHANNEL_UNIFORM = BIT(7),
- NVM_CHANNEL_20MHZ = BIT(8),
- NVM_CHANNEL_40MHZ = BIT(9),
- NVM_CHANNEL_80MHZ = BIT(10),
- NVM_CHANNEL_160MHZ = BIT(11),
- NVM_CHANNEL_DC_HIGH = BIT(12),
- NVM_CHANNEL_VLP = BIT(13),
- NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+ NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+ NVM_CHANNEL_UNIFORM = BIT(7),
+ NVM_CHANNEL_20MHZ = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
+ NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -332,7 +336,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
- u32 nvm_flags, const struct iwl_cfg *cfg)
+ u32 nvm_flags, const struct iwl_rf_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
@@ -399,7 +403,7 @@ static int iwl_init_channel_map(struct iwl_trans *trans,
const void * const nvm_ch_flags,
u32 sbands_flags, bool v4)
{
- const struct iwl_cfg *cfg = trans->cfg;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
struct device *dev = trans->dev;
int ch_idx;
int n_channels = 0;
@@ -500,7 +504,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
struct ieee80211_sta_vht_cap *vht_cap,
u8 tx_chains, u8 rx_chains)
{
- const struct iwl_cfg *cfg = trans->cfg;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
@@ -513,7 +517,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
IEEE80211_VHT_MAX_AMPDU_1024K <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
- if (!trans->cfg->ht_params->stbc)
+ if (!trans->cfg->ht_params.stbc)
vht_cap->cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK;
if (data->vht160_supported)
@@ -523,7 +527,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
if (cfg->vht_mu_mimo_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
- if (cfg->ht_params->ldpc)
+ if (cfg->ht_params.ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
if (data->sku_cap_mimo_disabled) {
@@ -531,21 +535,21 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
num_tx_ants = 1;
}
- if (trans->cfg->ht_params->stbc && num_tx_ants > 1)
+ if (trans->cfg->ht_params.stbc && num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
case IWL_AMSDU_2K:
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
@@ -916,8 +920,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
{
bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO));
- bool slow_pcie = (!trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
+ bool slow_pcie = (!trans->mac_cfg->integrated &&
+ trans->info.pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -944,7 +948,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
break;
case NL80211_BAND_6GHZ:
- if (!trans->reduced_cap_sku) {
+ if (!trans->reduced_cap_sku &&
+ (!trans->cfg->bw_limit || trans->cfg->bw_limit >= 320)) {
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |=
@@ -1031,11 +1036,11 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap)
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_FM:
case IWL_CFG_RF_TYPE_WH:
@@ -1047,7 +1052,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
break;
}
- if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
@@ -1076,13 +1081,13 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
!is_ap) {
iftype_data->vendor_elems.data = iwl_vendor_caps;
iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps);
}
- if (!trans->cfg->ht_params->stbc) {
+ if (!trans->cfg->ht_params.stbc) {
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
@@ -1094,19 +1099,23 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
}
- if (trans->no_160)
+ if (trans->cfg->bw_limit && trans->cfg->bw_limit < 160)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- if (trans->reduced_cap_sku) {
+ if ((trans->cfg->bw_limit && trans->cfg->bw_limit < 320) ||
+ trans->reduced_cap_sku) {
memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
+
+ if (trans->reduced_cap_sku) {
iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
- iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
}
}
@@ -1242,7 +1251,7 @@ static void iwl_init_sbands(struct iwl_trans *trans,
n_used, n_channels);
}
-static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+static int iwl_get_sku(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
if (cfg->nvm_type != IWL_NVM_EXT)
@@ -1251,7 +1260,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000));
}
-static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
+static int iwl_get_nvm_version(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw)
{
if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
@@ -1260,7 +1269,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
NVM_VERSION_EXT_NVM));
}
-static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+static int iwl_get_radio_cfg(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
if (cfg->nvm_type != IWL_NVM_EXT)
@@ -1270,7 +1279,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
}
-static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
+static int iwl_get_n_hw_addrs(const struct iwl_rf_cfg *cfg, const __le16 *nvm_sw)
{
int n_hw_addr;
@@ -1282,7 +1291,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
return n_hw_addr & N_HW_ADDR_MASK;
}
-static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
+static void iwl_set_radio_cfg(const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
u32 radio_cfg)
{
@@ -1341,7 +1350,7 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
}
static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *mac_override,
const __be16 *nvm_hw)
@@ -1390,11 +1399,12 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
}
static int iwl_set_hw_address(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
struct iwl_nvm_data *data, const __be16 *nvm_hw,
const __le16 *mac_override)
{
- if (cfg->mac_addr_from_csr) {
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
+ if (mac_cfg->base->mac_addr_from_csr) {
iwl_set_hw_address_from_csr(trans, data);
} else if (cfg->nvm_type != IWL_NVM_EXT) {
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
@@ -1424,7 +1434,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
}
static bool
-iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const __be16 *nvm_hw)
{
/*
@@ -1436,7 +1446,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* in 5GHz otherwise the FW will throw a sysassert when we try
* to use them.
*/
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/*
* Unlike the other sections in the NVM, the hw
* section uses big-endian.
@@ -1456,7 +1466,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
struct iwl_nvm_data *
-iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw, u8 tx_ant, u8 rx_ant)
{
@@ -1520,7 +1530,7 @@ iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data);
struct iwl_nvm_data *
-iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
@@ -1620,7 +1630,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg)
+ const struct iwl_rf_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1655,6 +1665,10 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
flags |= NL80211_RRF_NO_OUTDOOR;
+ if (nvm_flags & NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY &&
+ flags & NL80211_RRF_NO_IR)
+ flags |= NL80211_RRF_ALLOW_20MHZ_ACTIVITY;
+
/* Set the GO concurrent flag only in case that NO_IR is set.
* Otherwise it is meaningless
*/
@@ -1731,10 +1745,12 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
}
struct ieee80211_regdomain *
-iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_mcc_info(struct iwl_trans *trans,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info, u32 cap, u8 resp_ver)
{
+ const struct iwl_rf_cfg *cfg = trans->cfg;
+ struct device *dev = trans->dev;
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
@@ -1989,8 +2005,8 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
le32_to_cpu(dword_buff[3]));
/* nvm file validation, dword_buff[2] holds the file version */
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
- trans->hw_rev_step == SILICON_C_STEP &&
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+ trans->info.hw_rev_step == SILICON_C_STEP &&
le32_to_cpu(dword_buff[2]) < 0xE4A) {
ret = -EFAULT;
goto out;
@@ -2057,7 +2073,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
break;
}
- iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
+ iwl_nvm_fixups(trans->info.hw_id, section_id, temp, section_size);
kfree(nvm_sections[section_id].data);
nvm_sections[section_id].data = temp;
@@ -2160,7 +2176,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 0c6c3fb8c6dd..9ce9fa4e78fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2015, 2018-2025 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
@@ -30,7 +30,7 @@ enum iwl_nvm_sbands_flags {
* later with iwl_free_nvm_data().
*/
struct iwl_nvm_data *
-iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
const __be16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
@@ -46,9 +46,17 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* accordingly. An ERR_PTR is returned on error.
* If not given to the regulatory core, the user is responsible for freeing
* the regdomain returned here with kfree.
+ *
+ * @trans: the transport
+ * @num_of_ch: the number of channels
+ * @channels: channel map
+ * @fw_mcc: firmware country code
+ * @geo_info: geo info value
+ * @cap: capability
+ * @resp_ver: FW response version
*/
struct ieee80211_regdomain *
-iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_mcc_info(struct iwl_trans *trans,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info, u32 cap, u8 resp_ver);
@@ -87,7 +95,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
* iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data
*/
struct iwl_nvm_data *
-iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
const struct iwl_fw *fw, u8 set_tx_ant, u8 set_rx_ant);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
index b3c25acd3691..ec312c90ff85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023, 2025 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#include <linux/types.h>
@@ -42,7 +42,7 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
enum nl80211_band band,
u8 tx_chains, u8 rx_chains)
{
- const struct iwl_cfg *cfg = trans->cfg;
+ const struct iwl_rf_cfg *cfg = trans->cfg;
int max_bit_rate = 0;
tx_chains = hweight8(tx_chains);
@@ -53,7 +53,8 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
if (!(data->sku_cap_11n_enable) ||
(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
- !cfg->ht_params) {
+ /* there are no devices with HT but without HT40 entirely */
+ !cfg->ht_params.ht40_bands) {
ht_info->ht_supported = false;
return;
}
@@ -64,17 +65,17 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
- if (cfg->ht_params->stbc) {
+ if (cfg->ht_params.stbc) {
ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
if (tx_chains > 1)
ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
}
- if (cfg->ht_params->ldpc)
+ if (cfg->ht_params.ldpc)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
- if (trans->trans_cfg->mq_rx_supported ||
+ if (trans->mac_cfg->mq_rx_supported ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
@@ -90,13 +91,13 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
if (rx_chains >= 3)
ht_info->mcs.rx_mask[2] = 0xFF;
- if (cfg->ht_params->ht_greenfield_support)
+ if (cfg->ht_params.ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (cfg->ht_params->ht40_bands & BIT(band)) {
+ if (cfg->ht_params.ht40_bands & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
max_bit_rate = MAX_BIT_RATE_40_MHZ;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 34eca1a568ea..5dc299296d6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2024-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -17,7 +17,7 @@ struct sk_buff;
struct iwl_device_cmd;
struct iwl_rx_cmd_buffer;
struct iwl_fw;
-struct iwl_cfg;
+struct iwl_rf_cfg;
/**
* DOC: Operational mode - what is it ?
@@ -52,12 +52,20 @@ struct iwl_cfg;
* any debug collection must happen synchronously as
* the device will be shut down
* @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full
+ * @IWL_ERR_TYPE_TOP_RESET_BY_BT: TOP reset initiated by BT
+ * @IWL_ERR_TYPE_TOP_FATAL_ERROR: TOP fatal error
+ * @IWL_ERR_TYPE_TOP_RESET_FAILED: TOP reset failed
+ * @IWL_ERR_TYPE_DEBUGFS: error/reset indication from debugfs
*/
enum iwl_fw_error_type {
IWL_ERR_TYPE_IRQ,
IWL_ERR_TYPE_NMI_FORCED,
IWL_ERR_TYPE_RESET_HS_TIMEOUT,
IWL_ERR_TYPE_CMD_QUEUE_FULL,
+ IWL_ERR_TYPE_TOP_RESET_BY_BT,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR,
+ IWL_ERR_TYPE_TOP_RESET_FAILED,
+ IWL_ERR_TYPE_DEBUGFS,
};
/**
@@ -142,7 +150,7 @@ struct iwl_fw_error_dump_mode {
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
- const struct iwl_cfg *cfg,
+ const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
@@ -242,6 +250,9 @@ static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode,
{
might_sleep();
+ if (WARN_ON(mode->type == IWL_ERR_TYPE_TOP_RESET_BY_BT))
+ return;
+
if (op_mode->ops->dump_error)
op_mode->ops->dump_error(op_mode, mode);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index e7b2e08645ef..8a40801cf0dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -15,7 +15,7 @@
#include <linux/dmapool.h>
#include "fw/api/commands.h"
#include "pcie/internal.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
struct iwl_trans_dev_restart_data {
struct list_head list;
@@ -82,14 +82,14 @@ void iwl_trans_free_restart_list(void)
struct iwl_trans_reprobe {
struct device *dev;
- struct work_struct work;
+ struct delayed_work work;
};
static void iwl_trans_reprobe_wk(struct work_struct *wk)
{
struct iwl_trans_reprobe *reprobe;
- reprobe = container_of(wk, typeof(*reprobe), work);
+ reprobe = container_of(wk, typeof(*reprobe), work.work);
if (device_reprobe(reprobe->dev))
dev_err(reprobe->dev, "reprobe failed!\n");
@@ -98,6 +98,31 @@ static void iwl_trans_reprobe_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
+static void iwl_trans_schedule_reprobe(struct iwl_trans *trans,
+ unsigned int delay_ms)
+{
+ struct iwl_trans_reprobe *reprobe;
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(trans, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = get_device(trans->dev);
+ INIT_DELAYED_WORK(&reprobe->work, iwl_trans_reprobe_wk);
+ schedule_delayed_work(&reprobe->work, msecs_to_jiffies(delay_ms));
+}
+
#define IWL_TRANS_RESET_OK_TIME 7 /* seconds */
static enum iwl_reset_mode
@@ -106,18 +131,46 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
struct iwl_trans_dev_restart_data *data;
enum iwl_reset_mode at_least = 0;
unsigned int index;
- static const enum iwl_reset_mode escalation_list[] = {
+ static const enum iwl_reset_mode escalation_list_old[] = {
+ IWL_RESET_MODE_SW_RESET,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_REPROBE,
+ IWL_RESET_MODE_FUNC_RESET,
+ IWL_RESET_MODE_PROD_RESET,
+ };
+ static const enum iwl_reset_mode escalation_list_sc[] = {
IWL_RESET_MODE_SW_RESET,
IWL_RESET_MODE_REPROBE,
IWL_RESET_MODE_REPROBE,
IWL_RESET_MODE_FUNC_RESET,
- /* FIXME: add TOP reset */
+ IWL_RESET_MODE_TOP_RESET,
IWL_RESET_MODE_PROD_RESET,
- /* FIXME: add TOP reset */
+ IWL_RESET_MODE_TOP_RESET,
IWL_RESET_MODE_PROD_RESET,
- /* FIXME: add TOP reset */
+ IWL_RESET_MODE_TOP_RESET,
IWL_RESET_MODE_PROD_RESET,
};
+ const enum iwl_reset_mode *escalation_list;
+ size_t escalation_list_size;
+
+ /* used by TOP fatal error/TOP reset */
+ if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_FAILED)
+ return IWL_RESET_MODE_PROD_RESET;
+
+ if (trans->request_top_reset) {
+ trans->request_top_reset = 0;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC)
+ return IWL_RESET_MODE_TOP_RESET;
+ return IWL_RESET_MODE_PROD_RESET;
+ }
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
+ escalation_list = escalation_list_sc;
+ escalation_list_size = ARRAY_SIZE(escalation_list_sc);
+ } else {
+ escalation_list = escalation_list_old;
+ escalation_list_size = ARRAY_SIZE(escalation_list_old);
+ }
if (trans->restart.during_reset)
at_least = IWL_RESET_MODE_REPROBE;
@@ -132,8 +185,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
data->restart_count = 0;
index = data->restart_count;
- if (index >= ARRAY_SIZE(escalation_list)) {
- index = ARRAY_SIZE(escalation_list) - 1;
+ if (index >= escalation_list_size) {
+ index = escalation_list_size - 1;
if (!data->backoff) {
data->backoff = true;
return IWL_RESET_MODE_BACKOFF;
@@ -144,15 +197,21 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
return max(at_least, escalation_list[index]);
}
+#define IWL_TRANS_TOP_FOLLOWER_WAIT 180 /* ms */
+
#define IWL_TRANS_RESET_DELAY (HZ * 60)
static void iwl_trans_restart_wk(struct work_struct *wk)
{
struct iwl_trans *trans = container_of(wk, typeof(*trans),
restart.wk.work);
- struct iwl_trans_reprobe *reprobe;
enum iwl_reset_mode mode;
+ if (trans->restart.mode.type == IWL_ERR_TYPE_TOP_RESET_BY_BT) {
+ iwl_trans_schedule_reprobe(trans, IWL_TRANS_TOP_FOLLOWER_WAIT);
+ return;
+ }
+
if (!trans->op_mode)
return;
@@ -187,31 +246,19 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
iwl_trans_inc_restart_count(trans->dev);
switch (mode) {
+ case IWL_RESET_MODE_TOP_RESET:
+ trans->do_top_reset = 1;
+ IWL_ERR(trans, "Device error - TOP reset\n");
+ fallthrough;
case IWL_RESET_MODE_SW_RESET:
- IWL_ERR(trans, "Device error - SW reset\n");
+ if (mode == IWL_RESET_MODE_SW_RESET)
+ IWL_ERR(trans, "Device error - SW reset\n");
iwl_trans_opmode_sw_reset(trans, trans->restart.mode.type);
break;
case IWL_RESET_MODE_REPROBE:
IWL_ERR(trans, "Device error - reprobe!\n");
- /*
- * get a module reference to avoid doing this while unloading
- * anyway and to avoid scheduling a work with code that's
- * being removed.
- */
- if (!try_module_get(THIS_MODULE)) {
- IWL_ERR(trans, "Module is being unloaded - abort\n");
- return;
- }
-
- reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
- if (!reprobe) {
- module_put(THIS_MODULE);
- return;
- }
- reprobe->dev = get_device(trans->dev);
- INIT_WORK(&reprobe->work, iwl_trans_reprobe_wk);
- schedule_work(&reprobe->work);
+ iwl_trans_schedule_reprobe(trans, 0);
break;
default:
iwl_trans_pcie_reset(trans, mode);
@@ -221,7 +268,7 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_cfg_trans_params *cfg_trans)
+ const struct iwl_mac_cfg *mac_cfg)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -232,7 +279,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
if (!trans)
return NULL;
- trans->trans_cfg = cfg_trans;
+ trans->mac_cfg = mac_cfg;
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
@@ -240,7 +287,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#endif
trans->dev = dev;
- trans->num_rx_queues = 1;
INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
@@ -251,14 +297,18 @@ int iwl_trans_init(struct iwl_trans *trans)
{
int txcmd_size, txcmd_align;
- if (!trans->trans_cfg->gen2) {
- txcmd_size = sizeof(struct iwl_tx_cmd);
+ /* check if name/num_rx_queues were set as a proxy for info being set */
+ if (WARN_ON(!trans->info.name || !trans->info.num_rxqs))
+ return -EINVAL;
+
+ if (!trans->mac_cfg->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v6);
txcmd_align = sizeof(void *);
- } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ } else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v9);
txcmd_align = 64;
} else {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_size = sizeof(struct iwl_tx_cmd);
txcmd_align = 128;
}
@@ -266,7 +316,7 @@ int iwl_trans_init(struct iwl_trans *trans)
txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
+ if (WARN_ON(trans->mac_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
@@ -278,9 +328,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (!trans->dev_cmd_pool)
return -ENOMEM;
- /* Initialize the wait queue for commands */
- init_waitqueue_head(&trans->wait_command_queue);
-
return 0;
}
@@ -298,17 +345,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
- /*
- * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
- * bit is set early in the D3 flow, before we send all the commands
- * that configure the firmware for D3 operation (power, patterns, ...)
- * and we don't want to flag all those with CMD_SEND_IN_D3.
- * So use the system_pm_mode instead. The only command sent after
- * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
- * CMD_SEND_IN_D3.
- */
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3)))
+ if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status)))
return -EHOSTDOWN;
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
@@ -321,7 +358,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
- if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
+ if (trans->conf.wide_cmd_header && !iwl_cmd_groupid(cmd->id)) {
if (cmd->id != REPLY_ERROR)
cmd->id = DEF_ID(cmd->id);
}
@@ -365,11 +402,12 @@ const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
grp = iwl_cmd_groupid(id);
cmd = iwl_cmd_opcode(id);
- if (!trans->command_groups || grp >= trans->command_groups_size ||
- !trans->command_groups[grp].arr)
+ if (!trans->conf.command_groups ||
+ grp >= trans->conf.command_groups_size ||
+ !trans->conf.command_groups[grp].arr)
return "UNKNOWN";
- arr = &trans->command_groups[grp];
+ arr = &trans->conf.command_groups[grp];
ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp);
if (!ret)
return "UNKNOWN";
@@ -377,37 +415,29 @@ const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
}
IWL_EXPORT_SYMBOL(iwl_get_cmd_string);
-int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
+void iwl_trans_op_mode_enter(struct iwl_trans *trans,
+ struct iwl_op_mode *op_mode)
{
- int i, j;
- const struct iwl_hcmd_arr *arr;
+ trans->op_mode = op_mode;
- for (i = 0; i < trans->command_groups_size; i++) {
- arr = &trans->command_groups[i];
- if (!arr->arr)
- continue;
- for (j = 0; j < arr->size - 1; j++)
- if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id)
- return -1;
- }
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+ if (WARN_ON(trans->conf.n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+ trans->conf.n_no_reclaim_cmds =
+ ARRAY_SIZE(trans->conf.no_reclaim_cmds);
-void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
-{
- trans->op_mode = trans_cfg->op_mode;
+ WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd);
- iwl_trans_pcie_configure(trans, trans_cfg);
- WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+ iwl_trans_pcie_op_mode_enter(trans);
}
-IWL_EXPORT_SYMBOL(iwl_trans_configure);
+IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter);
int iwl_trans_start_hw(struct iwl_trans *trans)
{
might_sleep();
+ clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status);
+ /* opmode may not resume if it detects errors */
+ clear_bit(STATUS_SUSPENDED, &trans->status);
+
return iwl_trans_pcie_start_hw(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
@@ -421,6 +451,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
cancel_delayed_work_sync(&trans->restart.wk);
trans->op_mode = NULL;
+ memset(&trans->conf, 0, sizeof(trans->conf));
trans->state = IWL_TRANS_NO_FW;
}
@@ -497,18 +528,31 @@ IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
+ int err;
+
might_sleep();
- return iwl_trans_pcie_d3_suspend(trans, test, reset);
+ err = iwl_trans_pcie_d3_suspend(trans, test, reset);
+
+ if (!err)
+ set_bit(STATUS_SUSPENDED, &trans->status);
+
+ return err;
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
bool test, bool reset)
{
+ int err;
+
might_sleep();
- return iwl_trans_pcie_d3_resume(trans, status, test, reset);
+ err = iwl_trans_pcie_d3_resume(trans, status, test, reset);
+
+ clear_bit(STATUS_SUSPENDED, &trans->status);
+
+ return err;
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
@@ -558,34 +602,39 @@ iwl_trans_release_nic_access(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
-void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_fw_alive(struct iwl_trans *trans)
{
might_sleep();
trans->state = IWL_TRANS_FW_ALIVE;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_trans_pcie_gen2_fw_alive(trans);
else
- iwl_trans_pcie_fw_alive(trans, scd_addr);
+ iwl_trans_pcie_fw_alive(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
-int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill)
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
+ enum iwl_ucode_type ucode_type, bool run_in_rfkill)
{
+ const struct fw_img *img;
int ret;
might_sleep();
- WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+ img = iwl_get_ucode_image(fw, ucode_type);
+ if (!img)
+ return -EINVAL;
clear_bit(STATUS_FW_ERROR, &trans->status);
- if (trans->trans_cfg->gen2)
- ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
+ if (trans->mac_cfg->gen2)
+ ret = iwl_trans_pcie_gen2_start_fw(trans, fw, img,
+ run_in_rfkill);
else
- ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
+ ret = iwl_trans_pcie_start_fw(trans, fw, img,
+ run_in_rfkill);
if (ret == 0)
trans->state = IWL_TRANS_FW_STARTED;
@@ -626,7 +675,7 @@ void iwl_trans_stop_device(struct iwl_trans *trans)
iwl_op_mode_dump_error(trans->op_mode, &mode);
}
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_trans_pcie_gen2_stop_device(trans);
else
iwl_trans_pcie_stop_device(trans);
@@ -645,7 +694,7 @@ int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
"bad state = %d\n", trans->state))
return -EIO;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
@@ -778,14 +827,14 @@ int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
const struct iwl_ucode_capabilities *capa)
{
- return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
+ return iwl_trans_pcie_ctx_info_v2_load_pnvm(trans, pnvm_data, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
void iwl_trans_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
- iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
+ iwl_trans_pcie_ctx_info_v2_set_pnvm(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
@@ -793,7 +842,7 @@ int iwl_trans_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
const struct iwl_ucode_capabilities *capa)
{
- return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
+ return iwl_trans_pcie_ctx_info_v2_load_reduce_power(trans, payloads,
capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
@@ -801,6 +850,6 @@ IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
- iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
+ iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index ce4954b0d524..012b1e44bce3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,16 +109,12 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* the response. The caller needs to call iwl_free_resp when done.
* @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
* @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
- * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
- * SUSPEND and RESUME commands. We are in D3 mode when we set
- * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
CMD_BLOCK_TXQS = BIT(3),
- CMD_SEND_IN_D3 = BIT(4),
};
#define CMD_MODE_BITS 5
@@ -304,6 +300,10 @@ enum iwl_d3_status {
* via iwl_trans_finish_sw_reset()
* @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
* the firmware state yet
+ * @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
+ * attempt another reset yet
+ * @STATUS_SUSPENDED: device is suspended, don't send commands that
+ * aren't marked accordingly
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -317,6 +317,8 @@ enum iwl_trans_status {
STATUS_SUPPRESS_CMD_ERROR_ONCE,
STATUS_IN_SW_RESET,
STATUS_RESET_PENDING,
+ STATUS_TRANS_RESET_IN_PROGRESS,
+ STATUS_SUSPENDED,
};
static inline int
@@ -328,6 +330,7 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
+ return get_order(8 * 1024);
case IWL_AMSDU_12K:
return get_order(16 * 1024);
default:
@@ -387,7 +390,8 @@ struct iwl_dump_sanitize_ops {
/**
* struct iwl_trans_config - transport configuration
*
- * @op_mode: pointer to the upper layer.
+ * These values should be set before iwl_trans_op_mode_enter().
+ *
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
* @cmd_fifo: the fifo for host commands
@@ -398,8 +402,6 @@ struct iwl_dump_sanitize_ops {
* @n_no_reclaim_cmds: # of commands in list
* @rx_buf_size: RX buffer size needed for A-MSDUs
* if unset 4k will be the RX buffer size
- * @bc_table_dword: set to true if the BC table expects the byte count to be
- * in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
@@ -410,17 +412,24 @@ struct iwl_dump_sanitize_ops {
* @queue_alloc_cmd_ver: queue allocation command version, set to 0
* for using the older SCD_QUEUE_CFG, set to the version of
* SCD_QUEUE_CONFIG_CMD otherwise.
+ * @wide_cmd_header: true when ucode supports wide command header format
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ * starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ * start of the 802.11 header in the @rx_mpdu_cmd
+ * @dsbr_urm_fw_dependent: switch to URM based on fw settings
+ * @dsbr_urm_permanent: switch to URM permanently
+ * @mbx_addr_0_step: step address data 0
+ * @mbx_addr_1_step: step address data 1
+ * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
*/
struct iwl_trans_config {
- struct iwl_op_mode *op_mode;
-
u8 cmd_queue;
u8 cmd_fifo;
- const u8 *no_reclaim_cmds;
- unsigned int n_no_reclaim_cmds;
+ u8 n_no_reclaim_cmds;
+ u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
enum iwl_amsdu_size rx_buf_size;
- bool bc_table_dword;
bool scd_set_active;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -428,6 +437,16 @@ struct iwl_trans_config {
u8 cb_data_offs;
bool fw_reset_handshake;
u8 queue_alloc_cmd_ver;
+
+ bool wide_cmd_header;
+ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+
+ u8 dsbr_urm_fw_dependent:1,
+ dsbr_urm_permanent:1,
+ ext_32khz_clock_valid:1;
+
+ u32 mbx_addr_0_step;
+ u32 mbx_addr_1_step;
};
struct iwl_trans_dump_data {
@@ -513,23 +532,6 @@ enum iwl_trans_state {
*/
/**
- * enum iwl_plat_pm_mode - platform power management mode
- *
- * This enumeration describes the device's platform power management
- * behavior when in system-wide suspend (i.e WoWLAN).
- *
- * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
- * device. In system-wide suspend mode, it means that the all
- * connections will be closed automatically by mac80211 before
- * the platform is suspended.
- * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
- */
-enum iwl_plat_pm_mode {
- IWL_PLAT_PM_MODE_DISABLED,
- IWL_PLAT_PM_MODE_D3,
-};
-
-/**
* enum iwl_ini_cfg_state
* @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
* @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
@@ -819,106 +821,90 @@ struct iwl_txq {
};
/**
+ * struct iwl_trans_info - transport info for outside use
+ * @name: the device name
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
+ * @hw_rev: the revision data of the HW
+ * @hw_rev_step: The mac step of the HW
+ * @hw_rf_id: the device RF ID
+ * @hw_cnv_id: the device CNV ID
+ * @hw_crf_id: the device CRF ID
+ * @hw_wfpm_id: the device wfpm ID
+ * @hw_id: the ID of the device / sub-device
+ * Bits 0:15 represent the sub-device ID
+ * Bits 16:31 represent the device ID.
+ * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
+ * only valid for discrete (not integrated) NICs
+ * @num_rxqs: number of RX queues allocated by the transport
+ */
+struct iwl_trans_info {
+ const char *name;
+ u32 max_skb_frags;
+ u32 hw_rev;
+ u32 hw_rev_step;
+ u32 hw_rf_id;
+ u32 hw_crf_id;
+ u32 hw_cnv_id;
+ u32 hw_wfpm_id;
+ u32 hw_id;
+ u8 pcie_link_speed;
+ u8 num_rxqs;
+};
+
+/**
* struct iwl_trans - transport common data
*
* @csme_own: true if we couldn't get ownership on the device
* @op_mode: pointer to the op_mode
- * @trans_cfg: the trans-specific configuration part
+ * @mac_cfg: the trans-specific configuration part
* @cfg: pointer to the configuration
* @drv: pointer to iwl_drv
+ * @conf: configuration set by the opmode before enter
* @state: current device state
* @status: a bit-mask of transport status flags
* @dev: pointer to struct device * that represents the device
- * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
- * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id: a u32 with the device RF ID
- * @hw_cnv_id: a u32 with the device CNV ID
- * @hw_crf_id: a u32 with the device CRF ID
- * @hw_wfpm_id: a u32 with the device wfpm ID
- * @hw_id: a u32 with the ID of the device / sub-device.
- * Set during transport allocation.
- * @hw_id_str: a string with info about HW ID. Set during transport allocation.
- * @sku_id: the SKU identifier (for PNVM matching)
+ * @info: device information for use by other layers
* @pnvm_loaded: indicates PNVM was loaded
- * @hw_rev: the revision data of the HW
- * @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
* @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
- * @command_groups: pointer to command group name list array
- * @command_groups_size: array size of @command_groups
- * @wide_cmd_header: true when ucode supports wide command header format
- * @wait_command_queue: wait queue for sync commands
- * @num_rx_queues: number of RX queues allocated by the transport;
- * the transport must set this before calling iwl_drv_start()
- * @iml_len: the length of the image loader
- * @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @dev_cmd_pool_name: name for the TX command allocation pool
* @dbgfs_dir: iwlwifi debugfs base dir for this device
* @sync_cmd_lockdep_map: lockdep map for checking sync commands
- * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
- * starting the firmware, used for tracing
- * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
- * start of the 802.11 header in the @rx_mpdu_cmd
* @dbg: additional debug data, see &struct iwl_trans_debug
* @init_dram: FW initialization DMA data
- * @system_pm_mode: the system-wide power management mode in use.
- * This mode is set dynamically, depending on the WoWLAN values
- * configured from the userspace at runtime.
- * @name: the device name
- * @mbx_addr_0_step: step address data 0
- * @mbx_addr_1_step: step address data 1
- * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
- * only valid for discrete (not integrated) NICs
- * @invalid_tx_cmd: invalid TX command buffer
* @reduced_cap_sku: reduced capability supported SKU
- * @no_160: device not supporting 160 MHz
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
* @restart: restart worker data
* @restart.wk: restart worker
* @restart.mode: reset/restart error mode information
* @restart.during_reset: error occurred during previous software reset
- * @me_recheck_wk: worker to recheck WiAMT/CSME presence
- * @me_present: WiAMT/CSME is detected as present (1), not present (0)
- * or unknown (-1, so can still use it as a boolean safely)
* @trans_specific: data for the specific transport this is allocated for/with
- * @dsbr_urm_fw_dependent: switch to URM based on fw settings
- * @dsbr_urm_permanent: switch to URM permanently
- * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
+ * @request_top_reset: TOP reset was requested, used by the reset
+ * worker that should be scheduled (with appropriate reason)
+ * @do_top_reset: indication to the (PCIe) transport/context-info
+ * to do the TOP reset
*/
struct iwl_trans {
bool csme_own;
struct iwl_op_mode *op_mode;
- const struct iwl_cfg_trans_params *trans_cfg;
- const struct iwl_cfg *cfg;
+ const struct iwl_mac_cfg *mac_cfg;
+ const struct iwl_rf_cfg *cfg;
struct iwl_drv *drv;
+ struct iwl_trans_config conf;
enum iwl_trans_state state;
unsigned long status;
struct device *dev;
- u32 max_skb_frags;
- u32 hw_rev;
- u32 hw_rev_step;
- u32 hw_rf_id;
- u32 hw_crf_id;
- u32 hw_cnv_id;
- u32 hw_wfpm_id;
- u32 hw_id;
- char hw_id_str[52];
- u32 sku_id[3];
- bool reduced_cap_sku;
- u8 no_160:1, step_urm:1;
-
- u8 dsbr_urm_fw_dependent:1,
- dsbr_urm_permanent:1;
- bool ext_32khz_clock_valid;
-
- u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+ const struct iwl_trans_info info;
+ bool reduced_cap_sku;
+ bool step_urm;
bool pm_support;
bool ltr_enabled;
@@ -927,16 +913,6 @@ struct iwl_trans {
u8 reduce_power_loaded:1;
u8 failed_to_load_reduce_power_image:1;
- const struct iwl_hcmd_arr *command_groups;
- int command_groups_size;
- bool wide_cmd_header;
-
- wait_queue_head_t wait_command_queue;
- u8 num_rx_queues;
-
- size_t iml_len;
- u8 *iml;
-
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
char dev_cmd_pool_name[50];
@@ -950,24 +926,14 @@ struct iwl_trans {
struct iwl_trans_debug dbg;
struct iwl_self_init_dram init_dram;
- enum iwl_plat_pm_mode system_pm_mode;
-
- const char *name;
- u32 mbx_addr_0_step;
- u32 mbx_addr_1_step;
-
- u8 pcie_link_speed;
-
- struct iwl_dma_ptr invalid_tx_cmd;
-
struct {
struct delayed_work wk;
struct iwl_fw_error_dump_mode mode;
bool during_reset;
} restart;
- struct delayed_work me_recheck_wk;
- s8 me_present;
+ u8 request_top_reset:1,
+ do_top_reset:1;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -975,19 +941,18 @@ struct iwl_trans {
};
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
-int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
-void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
+void iwl_trans_op_mode_enter(struct iwl_trans *trans,
+ struct iwl_op_mode *op_mode);
int iwl_trans_start_hw(struct iwl_trans *trans);
void iwl_trans_op_mode_leave(struct iwl_trans *trans);
-void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_fw_alive(struct iwl_trans *trans);
-int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill);
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
+ enum iwl_ucode_type ucode_type, bool run_in_rfkill);
void iwl_trans_stop_device(struct iwl_trans *trans);
@@ -1150,6 +1115,9 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
{
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
+ /* clear this on device init, not cleared on any unbind/reprobe */
+ if (test_and_set_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status))
+ return;
trans->restart.mode.type = type;
trans->restart.mode.context = IWL_ERR_CONTEXT_WORKER;
@@ -1187,6 +1155,9 @@ static inline void iwl_trans_opmode_sw_reset(struct iwl_trans *trans,
set_bit(STATUS_IN_SW_RESET, &trans->status);
+ if (WARN_ON(type == IWL_ERR_TYPE_TOP_RESET_BY_BT))
+ return;
+
if (!trans->op_mode->ops->sw_reset ||
!trans->op_mode->ops->sw_reset(trans->op_mode, type))
clear_bit(STATUS_IN_SW_RESET, &trans->status);
@@ -1234,7 +1205,7 @@ static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_cfg_trans_params *cfg_trans);
+ const struct iwl_mac_cfg *cfg_trans);
int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
@@ -1245,6 +1216,19 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
void iwl_trans_free_restart_list(void);
+static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
+{
+ u16 result = trans->cfg->num_rbds;
+
+ /*
+ * Since AX210 family (So/Ty) the device cannot put mutliple
+ * frames into the same buffer, so double the value for them.
+ */
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 2 * result;
+ return result;
+}
+
/*****************************************************
* PCIe handling
*****************************************************/
@@ -1256,6 +1240,8 @@ enum iwl_reset_mode {
/* upper level modes: */
IWL_RESET_MODE_SW_RESET,
IWL_RESET_MODE_REPROBE,
+ /* TOP reset doesn't require PCIe remove */
+ IWL_RESET_MODE_TOP_RESET,
/* PCIE level modes: */
IWL_RESET_MODE_REMOVE_ONLY,
IWL_RESET_MODE_RESCAN,
@@ -1272,4 +1258,19 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
+/* Internal helper */
+static inline void iwl_trans_set_info(struct iwl_trans *trans,
+ struct iwl_trans_info *info)
+{
+ struct iwl_trans_info *write;
+
+ write = (void *)(uintptr_t)&trans->info;
+ *write = *info;
+}
+
+static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
+{
+ return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
+}
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
index b14ec98e28b6..c5b49851e4b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
@@ -4,7 +4,6 @@
*/
#include <net/gso.h>
#include <linux/ieee80211.h>
-#include <net/gso.h>
#include <net/ip.h>
#include "iwl-drv.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
index 687a9450ac98..bda488ae9eec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
@@ -124,10 +124,12 @@ void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
rcu_read_lock();
baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
- if (IWL_FW_CHECK(mld, !baid_data,
- "Got valid BAID %d but not allocated, invalid BAR release!\n",
- baid))
+ if (!baid_data) {
+ IWL_DEBUG_HT(mld,
+ "Got valid BAID %d but not allocated\n",
+ baid);
goto out_unlock;
+ }
if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
sta_id > mld->fw->ucode_capa.num_stations ||
@@ -444,7 +446,7 @@ static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
struct iwl_mld_baid_data *data,
u16 ssn)
{
- for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
struct iwl_mld_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mld_reorder_buf_entry *entries =
@@ -468,7 +470,7 @@ static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
&delba_data, sizeof(delba_data));
- for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
struct iwl_mld_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mld_reorder_buf_entry *entries =
@@ -530,7 +532,7 @@ int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
- mld->trans->num_rx_queues * reorder_buf_size,
+ mld->trans->info.num_rxqs * reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.c b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
index 571eabd0b511..26511b49d89a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ap.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
@@ -11,6 +11,7 @@
#include "tx.h"
#include "power.h"
#include "key.h"
+#include "phy.h"
#include "iwl-utils.h"
#include "fw/api/sta.h"
@@ -269,6 +270,7 @@ int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *ctx;
int ret;
if (vif->type == NL80211_IFTYPE_AP)
@@ -314,6 +316,13 @@ int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
FW_CTXT_ACTION_MODIFY);
+ /* When the channel context was added, the link is not yet active, so
+ * min_def is always used. Update the PHY again here in case def should
+ * actually be used.
+ */
+ ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ iwl_mld_update_phy_chandef(mld, ctx);
+
return 0;
rm_bcast:
iwl_mld_remove_bcast_sta(mld, vif, link);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.c b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
index 5f262bd43f21..32c727b3b391 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
@@ -24,17 +24,13 @@ int iwl_mld_send_bt_init_conf(struct iwl_mld *mld)
void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
- const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
const struct iwl_bt_coex_profile_notif zero_notif = {};
/* zeroed structure means that BT is OFF */
bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif));
- if (bt_is_active == mld->bt_is_active)
- return;
-
+ mld->last_bt_notif = *notif;
IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF");
- mld->bt_is_active = bt_is_active;
-
iwl_mld_emlsr_check_bt(mld);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index ee99298eebf5..339b148d6793 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -774,7 +774,7 @@ iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld,
return;
for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
- for (int i = 1; i < mld->trans->num_rx_queues; i++)
+ for (int i = 1; i < mld->trans->info.num_rxqs; i++)
memcpy(mld_ptk_pn->q[i].pn[tid],
wowlan_status->ptk.aes_seq[tid].ccmp.pn,
IEEE80211_CCMP_PN_LEN);
@@ -1347,7 +1347,8 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
} else {
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ /* Async notification might send hcmds, which is not allowed in suspend */
+ iwl_mld_cancel_async_notifications(mld);
mld->fw_status.in_d3 = true;
}
@@ -1372,7 +1373,6 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n");
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mld->fw_status.in_d3 = false;
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
@@ -1437,7 +1437,7 @@ iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta,
ieee80211_get_key_rx_seq(key, tid, &seq);
/* and use the internal data for all queues */
- for (int que = 1; que < mld->trans->num_rx_queues; que++) {
+ for (int que = 1; que < mld->trans->info.num_rxqs; que++) {
u8 *cur_pn = mld_ptk_pn->q[que].pn[tid];
if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0)
@@ -1903,7 +1903,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n");
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
if (!mld->fw_status.in_d3) {
IWL_DEBUG_WOWLAN(mld,
"Device_powered_off() was called during wowlan\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index 93f9f78e4276..352da8aa7898 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -546,6 +546,9 @@ iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
#endif
MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
+ debugfs_create_bool("rx_ts_ptp", 0600, debugfs_dir,
+ &mld->monitor.ptp_time);
+
/* Create a symlink with mac80211. It will be removed when mac80211
* exits (before the opmode exits which removes the target.)
*/
@@ -997,8 +1000,8 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
mld_link_dir = debugfs_create_dir("iwlmld", dir);
}
-static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
- size_t count, void *data)
+static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data, bool v3)
{
struct ieee80211_link_sta *link_sta = data;
struct iwl_mld_link_sta *mld_link_sta;
@@ -1020,6 +1023,10 @@ static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
return -EIO;
+ /* input is in FW format (v2 or v3) so convert to v3 */
+ rate = iwl_v3_rate_from_v2_v3(cpu_to_le32(rate), v3);
+ rate = le32_to_cpu(iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3));
+
ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id,
partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE :
IWL_TLC_DEBUG_FIXED_RATE,
@@ -1033,6 +1040,18 @@ static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
return ret ? : count;
}
+static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ return _iwl_dbgfs_fixed_rate_write(mld, buf, count, data, false);
+}
+
+static ssize_t iwl_dbgfs_fixed_rate_v3_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ return _iwl_dbgfs_fixed_rate_write(mld, buf, count, data, true);
+}
+
static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
size_t count, void *data)
{
@@ -1072,6 +1091,7 @@ static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64);
LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64);
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate_v3, 64);
void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1079,5 +1099,6 @@ void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
struct dentry *dir)
{
LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200);
+ LINK_STA_DEBUGFS_ADD_FILE(fixed_rate_v3, dir, 0200);
LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
index 4b083d447ee2..73ed8d5cab43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
@@ -8,10 +8,10 @@
#include "fw/api/alive.h"
#include "fw/api/scan.h"
#include "fw/api/rx.h"
+#include "phy.h"
#include "fw/dbg.h"
#include "fw/pnvm.h"
#include "hcmd.h"
-#include "iwl-nvm-parse.h"
#include "power.h"
#include "mcc.h"
#include "led.h"
@@ -49,7 +49,7 @@ static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
- 1 + (i % (mld->trans->num_rx_queues - 1));
+ 1 + (i % (mld->trans->info.num_rxqs - 1));
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
@@ -99,17 +99,23 @@ static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
}
}
+struct iwl_mld_alive_data {
+ __le32 sku_id[3];
+ bool valid;
+};
+
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ unsigned int expected_sz;
struct iwl_mld *mld =
container_of(notif_wait, struct iwl_mld, notif_wait);
struct iwl_trans *trans = mld->trans;
u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
UCODE_ALIVE_NTFY, 0);
- struct iwl_alive_ntf_v6 *palive;
- bool *alive_valid = data;
+ struct iwl_mld_alive_data *alive_data = data;
+ struct iwl_alive_ntf *palive;
struct iwl_umac_alive *umac;
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
@@ -117,7 +123,19 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 umac_error_table;
u16 status;
- if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
+ switch (version) {
+ case 6:
+ case 7:
+ expected_sz = sizeof(struct iwl_alive_ntf_v6);
+ break;
+ case 8:
+ expected_sz = sizeof(struct iwl_alive_ntf);
+ break;
+ default:
+ return false;
+ }
+
+ if (pkt_len != expected_sz)
return false;
palive = (void *)pkt->data;
@@ -129,12 +147,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
- trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
- trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
- trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+ BUILD_BUG_ON(sizeof(alive_data->sku_id) !=
+ sizeof(palive->sku_id.data));
+ memcpy(alive_data->sku_id, palive->sku_id.data,
+ sizeof(palive->sku_id.data));
IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
- trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
+ le32_to_cpu(alive_data->sku_id[0]),
+ le32_to_cpu(alive_data->sku_id[1]),
+ le32_to_cpu(alive_data->sku_id[2]));
lmac_error_event_table =
le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
@@ -147,13 +168,13 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
~FW_ADDR_CACHE_CONTROL;
- if (umac_error_table >= trans->cfg->min_umac_error_event_table)
+ if (umac_error_table >= trans->mac_cfg->base->min_umac_error_event_table)
iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
else
IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
umac_error_table);
- *alive_valid = status == IWL_ALIVE_STATUS_OK;
+ alive_data->valid = status == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mld,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
@@ -171,6 +192,10 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
le16_to_cpu(palive->flags));
+ if (version >= 8)
+ IWL_DEBUG_FW(mld, "platform_id 0x%llx\n",
+ le64_to_cpu(palive->platform_id));
+
iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
return true;
@@ -208,24 +233,22 @@ static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
pc_data->pc_address);
}
-static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
+static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld,
+ struct iwl_mld_alive_data *alive_data)
{
- const struct fw_img *fw =
- iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
struct iwl_notification_wait alive_wait;
- bool alive_valid = false;
int ret;
lockdep_assert_wiphy(mld->wiphy);
iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
alive_cmd, ARRAY_SIZE(alive_cmd),
- iwl_alive_fn, &alive_valid);
+ iwl_alive_fn, alive_data);
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
- ret = iwl_trans_start_fw(mld->trans, fw, true);
+ ret = iwl_trans_start_fw(mld->trans, mld->fw, IWL_UCODE_REGULAR, true);
if (ret) {
iwl_remove_notification(&mld->notif_wait, &alive_wait);
return ret;
@@ -239,28 +262,26 @@ static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
iwl_fw_dbg_error_collect(&mld->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
iwl_mld_print_alive_notif_timeout(mld);
- goto alive_failure;
+ return ret;
}
- if (!alive_valid) {
+ if (!alive_data->valid) {
IWL_ERR(mld, "Loaded firmware is not valid!\n");
- ret = -EIO;
- goto alive_failure;
+ return -EIO;
}
- iwl_trans_fw_alive(mld->trans, 0);
+ iwl_trans_fw_alive(mld->trans);
return 0;
-
-alive_failure:
- iwl_trans_stop_device(mld->trans);
- return ret;
}
static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
{
struct iwl_notification_wait init_wait;
- struct iwl_init_extended_cfg_cmd init_cfg = {};
+ struct iwl_init_extended_cfg_cmd init_cfg = {
+ .init_flags = cpu_to_le32(BIT(IWL_INIT_PHY)),
+ };
+ struct iwl_mld_alive_data alive_data = {};
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
@@ -268,19 +289,15 @@ static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
lockdep_assert_wiphy(mld->wiphy);
- ret = iwl_mld_load_fw_wait_alive(mld);
+ ret = iwl_mld_load_fw_wait_alive(mld, &alive_data);
if (ret)
return ret;
- mld->trans->step_urm =
- !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
- CNVI_PMU_STEP_FLOW_FORCE_URM);
-
ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
- &mld->fw->ucode_capa);
+ &mld->fw->ucode_capa, alive_data.sku_id);
if (ret) {
IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
- goto init_failure;
+ return ret;
}
iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
@@ -298,31 +315,24 @@ static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
iwl_remove_notification(&mld->notif_wait, &init_wait);
- goto init_failure;
+ return ret;
+ }
+
+ ret = iwl_mld_send_phy_cfg_cmd(mld);
+ if (ret) {
+ IWL_ERR(mld, "Failed to send PHY config command: %d\n", ret);
+ iwl_remove_notification(&mld->notif_wait, &init_wait);
+ return ret;
}
ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
MLD_INIT_COMPLETE_TIMEOUT);
if (ret) {
IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
- goto init_failure;
- }
-
- if (!mld->nvm_data) {
- mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
- if (IS_ERR(mld->nvm_data)) {
- ret = PTR_ERR(mld->nvm_data);
- mld->nvm_data = NULL;
- IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
- goto init_failure;
- }
+ return ret;
}
return 0;
-
-init_failure:
- iwl_trans_stop_device(mld->trans);
- return ret;
}
int iwl_mld_load_fw(struct iwl_mld *mld)
@@ -333,7 +343,7 @@ int iwl_mld_load_fw(struct iwl_mld *mld)
ret = iwl_trans_start_hw(mld->trans);
if (ret)
- goto err;
+ return ret;
ret = iwl_mld_run_fw_init_sequence(mld);
if (ret)
@@ -361,9 +371,10 @@ void iwl_mld_stop_fw(struct iwl_mld *mld)
iwl_trans_stop_device(mld->trans);
- wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
-
- iwl_mld_purge_async_handlers_list(mld);
+ /* HW is stopped, no more coming RX. Cancel all notifications in
+ * case they were sent just before stopping the HW.
+ */
+ iwl_mld_cancel_async_notifications(mld);
mld->fw_status.running = false;
}
@@ -526,7 +537,7 @@ int iwl_mld_start_fw(struct iwl_mld *mld)
ret = iwl_mld_load_fw(mld);
if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
- goto error;
+ return ret;
}
IWL_DEBUG_INFO(mld, "uCode started.\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index e49e2260ac05..235b55e0fe59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -22,9 +22,17 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
struct iwl_mld *mld = mld_vif->mld;
struct iwl_mld_link *link;
+ mld_vif->emlsr.blocked_reasons &= ~IWL_MLD_EMLSR_BLOCKED_ROC;
+
+ if (mld_vif->aux_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &mld_vif->aux_sta);
+
/* EMLSR is turned back on during recovery */
vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+ if (mld_vif->roc_activity != ROC_NUM_ACTIVITIES)
+ ieee80211_remain_on_channel_expired(mld->hw);
+
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
for_each_mld_vif_valid_link(mld_vif, link) {
@@ -103,6 +111,24 @@ static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
IEEE80211_HE_MAC_CAP2_ACK_EN);
}
+static void iwl_mld_set_he_support(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd,
+ int cmd_ver)
+{
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_ap_support = 1;
+ } else {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_support = 1;
+ }
+}
+
/* fill the common part for all interface types */
static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct ieee80211_vif *vif,
@@ -112,6 +138,9 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
+ int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ MAC_CONFIG_CMD), 0);
lockdep_assert_wiphy(mld->wiphy);
@@ -138,12 +167,11 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- if (vif->type == NL80211_IFTYPE_AP)
- cmd->he_ap_support = cpu_to_le16(1);
+ iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
else
- cmd->he_support = cpu_to_le16(1);
-
- cmd->eht_support = cpu_to_le32(1);
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -151,10 +179,7 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
if (!link_conf->he_support)
continue;
- if (vif->type == NL80211_IFTYPE_AP)
- cmd->he_ap_support = cpu_to_le16(1);
- else
- cmd->he_support = cpu_to_le16(1);
+ iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
/* EHT, if supported, was already set above */
break;
@@ -226,11 +251,6 @@ static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld,
if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
cmd->filter_flags |=
cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
-
- if (vif->p2p)
- cmd->client.ctwin =
- cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
- IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
}
static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld,
@@ -393,6 +413,7 @@ iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
iwl_mld_emlsr_tmp_non_bss_done_wk);
}
+ iwl_mld_init_internal_sta(&mld_vif->aux_sta);
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index ec14d0736cee..49e2ce65557d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -10,6 +10,7 @@
#include "link.h"
#include "session-protect.h"
#include "d3.h"
+#include "fw/api/time-event.h"
enum iwl_mld_cca_40mhz_wa_status {
CCA_40_MHZ_WA_NONE,
@@ -59,6 +60,7 @@ enum iwl_mld_emlsr_blocked {
* loaded enough to justify EMLSR.
* @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI
* @IWL_MLD_EMLSR_EXIT_FW_REQUEST: Exit EMLSR because the FW requested it
+ * @IWL_MLD_EMLSR_EXIT_INVALID: internal exit reason due to invalid data
*/
enum iwl_mld_emlsr_exit {
IWL_MLD_EMLSR_EXIT_BLOCK = 0x1,
@@ -72,6 +74,7 @@ enum iwl_mld_emlsr_exit {
IWL_MLD_EMLSR_EXIT_CHAN_LOAD = 0x100,
IWL_MLD_EMLSR_EXIT_RFI = 0x200,
IWL_MLD_EMLSR_EXIT_FW_REQUEST = 0x400,
+ IWL_MLD_EMLSR_EXIT_INVALID = 0x800,
};
/**
@@ -123,8 +126,6 @@ struct iwl_mld_emlsr {
* Only valid for STA. (FIXME: needs to be per link)
* @num_associated_stas: number of associated STAs. Relevant only for AP mode.
* @ap_ibss_active: whether the AP/IBSS was started
- * @roc_activity: the id of the roc_activity running. Relevant for p2p device
- * only. Set to %ROC_NUM_ACTIVITIES when not in use.
* @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
* environment is too loaded, we work around this by reconnecting to the
* same AP with 20 MHz. This manages the status of the workaround.
@@ -140,6 +141,9 @@ struct iwl_mld_emlsr {
* @use_ps_poll: use ps_poll frames
* @disable_bf: disable beacon filter
* @dbgfs_slink: debugfs symlink for this interface
+ * @roc_activity: the id of the roc_activity running. Relevant for STA and
+ * p2p device only. Set to %ROC_NUM_ACTIVITIES when not in use.
+ * @aux_sta: station used for remain on channel. Used in P2P device.
*/
struct iwl_mld_vif {
/* Add here fields that need clean up on restart */
@@ -151,7 +155,6 @@ struct iwl_mld_vif {
struct ieee80211_key_conf __rcu *bigtks[2];
u8 num_associated_stas;
bool ap_ibss_active;
- u32 roc_activity;
enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool beacon_inject_active;
@@ -174,6 +177,8 @@ struct iwl_mld_vif {
bool disable_bf;
struct dentry *dbgfs_slink;
#endif
+ enum iwl_roc_activity roc_activity;
+ struct iwl_mld_int_sta aux_sta;
};
static inline struct iwl_mld_vif *
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 82a4979a3af3..d0f56189ad3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -580,7 +580,7 @@ iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld,
*link_sta = NULL;
- if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ if (mld->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
return NULL;
vif = iwl_mld_get_bss_vif(mld);
@@ -782,10 +782,11 @@ iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
iwl_mld_init_internal_sta(&mld_link->bcast_sta);
iwl_mld_init_internal_sta(&mld_link->mcast_sta);
- iwl_mld_init_internal_sta(&mld_link->aux_sta);
+ iwl_mld_init_internal_sta(&mld_link->mon_sta);
- wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
- iwl_mld_omi_bw_finished_work);
+ if (!mld->fw_status.in_hw_restart)
+ wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
+ iwl_mld_omi_bw_finished_work);
return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
index 42b7bdcbd741..39f04aae5579 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -39,7 +39,7 @@ struct iwl_probe_resp_data {
* @vif: the vif this link belongs to
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
- * @aux_sta: station used for remain on channel. Used in P2P device.
+ * @mon_sta: station used for TX injection in monitor interface.
* @link_id: over the air link ID
* @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
* but higher layers work differently, so we store the keys here for
@@ -72,7 +72,7 @@ struct iwl_mld_link {
struct ieee80211_vif *vif;
struct iwl_mld_int_sta bcast_sta;
struct iwl_mld_int_sta mcast_sta;
- struct iwl_mld_int_sta aux_sta;
+ struct iwl_mld_int_sta mon_sta;
u8 link_id;
struct {
@@ -89,7 +89,7 @@ struct iwl_mld_link {
struct iwl_probe_resp_data __rcu *probe_resp_data;
};
-/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+/* Cleanup function for struct iwl_mld_link, will be called in restart */
static inline void
iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
{
@@ -105,8 +105,8 @@ iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
iwl_mld_free_internal_sta(mld, &link->bcast_sta);
if (link->mcast_sta.sta_id != IWL_INVALID_STA)
iwl_mld_free_internal_sta(mld, &link->mcast_sta);
- if (link->aux_sta.sta_id != IWL_INVALID_STA)
- iwl_mld_free_internal_sta(mld, &link->aux_sta);
+ if (link->mon_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->mon_sta);
}
/* Convert a percentage from [0,100] to [0,255] */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
index a4a612afb3b3..f7faa87b8ba6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
@@ -21,7 +21,7 @@ static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
{
struct iwl_mld_low_latency *ll = &mld->low_latency;
bool global_low_latency = false;
- u8 num_rx_q = mld->trans->num_rx_queues;
+ u8 num_rx_q = mld->trans->info.num_rxqs;
for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
u32 total_vo_vi_pkts = 0;
@@ -131,12 +131,12 @@ int iwl_mld_low_latency_init(struct iwl_mld *mld)
struct iwl_mld_low_latency *ll = &mld->low_latency;
unsigned long ts = jiffies;
- ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
+ ll->pkts_counters = kcalloc(mld->trans->info.num_rxqs,
sizeof(*ll->pkts_counters), GFP_KERNEL);
if (!ll->pkts_counters)
return -ENOMEM;
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
spin_lock_init(&ll->pkts_counters[q].lock);
wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
@@ -167,7 +167,7 @@ void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
memset(ll->window_start, 0, sizeof(ll->window_start));
memset(ll->result, 0, sizeof(ll->result));
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
memset(ll->pkts_counters[q].vo_vi, 0,
sizeof(ll->pkts_counters[q].vo_vi));
}
@@ -276,7 +276,7 @@ void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
return;
if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
- queue >= mld->trans->num_rx_queues))
+ queue >= mld->trans->info.num_rxqs))
return;
if (mld->low_latency.stopped)
@@ -324,7 +324,7 @@ void iwl_mld_low_latency_restart(struct iwl_mld *mld)
ll->window_start[mac] = 0;
low_latency |= ll->result[mac];
- for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
spin_lock_bh(&ll->pkts_counters[q].lock);
ll->pkts_counters[q].vo_vi[mac] = 0;
spin_unlock_bh(&ll->pkts_counters[q].lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index 68d97d3b8f02..4ba050397632 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -243,7 +243,6 @@ static void iwl_mac_hw_set_flags(struct iwl_mld *mld)
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
- ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
@@ -305,7 +304,7 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
wiphy->max_remain_on_channel_duration = 10000;
- wiphy->hw_version = mld->trans->hw_id;
+ wiphy->hw_version = mld->trans->info.hw_id;
wiphy->hw_timestamp_max_peers = 1;
@@ -351,9 +350,9 @@ static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
hw->queues = IEEE80211_NUM_ACS;
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
- hw->netdev_features |= mld->cfg->features;
+ hw->netdev_features |= mld->trans->mac_cfg->base->features;
- hw->max_tx_fragments = mld->trans->max_skb_frags;
+ hw->max_tx_fragments = mld->trans->info.max_skb_frags;
hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL;
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
@@ -376,6 +375,24 @@ static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld)
{
+ int ratecheck;
+
+ /* check for rates version 3 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11) +
+ (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 6) +
+ (iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP, TX_CMD, 0) >= 9);
+
+ if (ratecheck != 0 && ratecheck != 5) {
+ IWL_ERR(mld, "Firmware has inconsistent rates\n");
+ return -EINVAL;
+ }
+
/* 11ax is expected to be enabled for all supported devices */
if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable))
return -EINVAL;
@@ -541,15 +558,6 @@ void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
(IS_ENABLED(CONFIG_PM_SLEEP) && iwl_mld_no_wowlan_suspend(mld)))
iwl_mld_stop_fw(mld);
- /* HW is stopped, no more coming RX. OTOH, the worker can't run as the
- * wiphy lock is held. Cancel it in case it was scheduled just before
- * we stopped the HW.
- */
- wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
-
- /* Empty out the list, as the worker won't do that */
- iwl_mld_purge_async_handlers_list(mld);
-
/* Clear in_hw_restart flag when stopping the hw, as mac80211 won't
* execute the restart.
*/
@@ -897,9 +905,8 @@ void iwl_mld_change_chanctx(struct ieee80211_hw *hw,
return;
}
update:
- phy->chandef = *chandef;
- iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+ iwl_mld_update_phy_chandef(mld, ctx);
}
static u8
@@ -1031,12 +1038,19 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
if (vif->type == NL80211_IFTYPE_MONITOR) {
- /* TODO: task=sniffer add sniffer station */
+ ret = iwl_mld_add_mon_sta(mld, vif, link);
+ if (ret)
+ goto deactivate_link;
+
mld->monitor.p80 =
iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
return 0;
+
+deactivate_link:
+ if (mld_link->active)
+ iwl_mld_deactivate_link(mld, link);
err:
RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
return ret;
@@ -1062,7 +1076,8 @@ void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mld_deactivate_link(mld, link);
- /* TODO: task=sniffer remove sniffer station */
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ iwl_mld_remove_mon_sta(mld, vif, link);
if (n_active > 1) {
/* Indicate to mac80211 that EML is disabled */
@@ -1258,9 +1273,14 @@ iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw,
}
static void
-iwl_mld_smps_wa(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable)
+iwl_mld_smps_workaround(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool workaround_required =
+ iwl_fw_lookup_cmd_ver(mld->fw, MAC_PM_POWER_TABLE, 0) < 2;
+
+ if (!workaround_required)
+ return;
/* Send the device-level power commands since the
* firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to
@@ -1307,7 +1327,7 @@ void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw,
}
if (changes & BSS_CHANGED_PS) {
- iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ iwl_mld_smps_workaround(mld, vif, vif->cfg.ps);
iwl_mld_update_mac_power(mld, vif, false);
}
@@ -1320,13 +1340,22 @@ iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
if (WARN_ON(!hw_req->req.n_channels ||
hw_req->req.n_channels >
mld->fw->ucode_capa.n_scan_channels))
return -EINVAL;
- return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+ ret = iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+ if (!ret) {
+ /* We will be busy with scanning, so the counters may not reflect the
+ * reality. Stop checking the counters until the scan ends
+ */
+ iwl_mld_start_ignoring_tpt_updates(mld);
+ }
+
+ return ret;
}
static void
@@ -1342,8 +1371,11 @@ iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
* cancel scan before ieee80211_scan_work() could run.
* To handle that, simply return if the scan is not running.
*/
- if (mld->scan.status & IWL_MLD_SCAN_REGULAR)
+ if (mld->scan.status & IWL_MLD_SCAN_REGULAR) {
iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true);
+ /* Scan is over, we can check again the tpt counters */
+ iwl_mld_stop_ignoring_tpt_updates(mld);
+ }
}
static int
@@ -1720,7 +1752,7 @@ static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
FW_CTXT_ACTION_MODIFY);
if (ret)
return ret;
- iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ iwl_mld_smps_workaround(mld, vif, vif->cfg.ps);
}
/* MFP is set by default before the station is authorized.
@@ -1763,7 +1795,7 @@ static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
&mld_vif->emlsr.check_tpt_wk);
iwl_mld_reset_cca_40mhz_workaround(mld, vif);
- iwl_mld_smps_wa(mld, vif, true);
+ iwl_mld_smps_workaround(mld, vif, true);
}
/* once we move into assoc state, need to update the FW to
@@ -2004,7 +2036,7 @@ static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld,
struct ieee80211_key_conf *key,
struct iwl_mld_ptk_pn **ptk_pn)
{
- u8 num_rx_queues = mld->trans->num_rx_queues;
+ u8 num_rx_queues = mld->trans->info.num_rxqs;
int keyidx = key->keyidx;
struct ieee80211_key_seq seq;
@@ -2460,15 +2492,17 @@ iwl_mld_change_vif_links(struct ieee80211_hw *hw,
added |= BIT(0);
for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (removed & BIT(i))
+ if (removed & BIT(i) && !WARN_ON(!old[i]))
iwl_mld_remove_link(mld, old[i]);
}
for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
if (added & BIT(i)) {
link_conf = link_conf_dereference_protected(vif, i);
- if (WARN_ON(!link_conf))
- return -EINVAL;
+ if (!link_conf) {
+ err = -EINVAL;
+ goto remove_added_links;
+ }
err = iwl_mld_add_link(mld, link_conf);
if (err)
@@ -2503,7 +2537,11 @@ remove_added_links:
iwl_mld_remove_link(mld, link_conf);
}
- return err;
+ if (WARN_ON(!iwl_mld_error_before_recovery(mld)))
+ return err;
+
+ /* reconfig will fix us anyway */
+ return 0;
}
static int iwl_mld_change_sta_links(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
index daca14e208bd..19cb562e7a73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/cfg80211.h>
@@ -158,7 +158,7 @@ iwl_mld_get_regdomain(struct iwl_mld *mld,
}
IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver);
- regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg,
+ regd = iwl_parse_nvm_mcc_info(mld->trans,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 73d2166a4c25..8cdd960c5245 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -26,6 +26,8 @@
#include "hcmd.h"
#include "fw/api/location.h"
+#include "iwl-nvm-parse.h"
+
#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
@@ -60,7 +62,7 @@ static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld)
VISIBLE_IF_IWLWIFI_KUNIT
void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
- const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw,
struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
{
mld->dev = trans->dev;
@@ -75,7 +77,6 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
/* Setup async RX handling */
spin_lock_init(&mld->async_handlers_lock);
- INIT_LIST_HEAD(&mld->async_handlers_list);
wiphy_work_init(&mld->async_handlers_wk,
iwl_mld_async_handlers_wk);
@@ -192,6 +193,7 @@ static const struct iwl_hcmd_names iwl_mld_system_names[] = {
HCMD_NAME(SOC_CONFIGURATION_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+ HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_STATISTICS_CMD),
HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
@@ -287,7 +289,9 @@ static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
- HCMD_NAME(STORED_BEACON_NTF),
+ HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
+ HCMD_NAME(D3_END_NOTIFICATION),
};
/* Please keep this array *SORTED* by hex value.
@@ -322,33 +326,40 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
static void
iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
{
- const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
static const u8 no_reclaim_cmds[] = {TX_CMD};
- struct iwl_trans_config trans_cfg = {
- .op_mode = op_mode,
- /* Rx is not supported yet, but add it to avoid warnings */
- .rx_buf_size = iwl_amsdu_size_to_rxb_size(),
- .command_groups = iwl_mld_groups,
- .command_groups_size = ARRAY_SIZE(iwl_mld_groups),
- .fw_reset_handshake = true,
- .queue_alloc_cmd_ver =
- iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(DATA_PATH_GROUP,
- SCD_QUEUE_CONFIG_CMD),
- 0),
- .no_reclaim_cmds = no_reclaim_cmds,
- .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
- .cb_data_offs = offsetof(struct ieee80211_tx_info,
- driver_data[2]),
- };
struct iwl_trans *trans = mld->trans;
+ u32 eckv_value;
- trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->iml = mld->fw->iml;
- trans->iml_len = mld->fw->iml_len;
- trans->wide_cmd_header = true;
+ iwl_bios_setup_step(trans, &mld->fwrt);
+ iwl_uefi_get_step_table(trans);
- iwl_trans_configure(trans, &trans_cfg);
+ if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
+ IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
+ else
+ trans->conf.ext_32khz_clock_valid = !!eckv_value;
+
+ trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size();
+ trans->conf.command_groups = iwl_mld_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_mld_groups);
+ trans->conf.fw_reset_handshake = true;
+ trans->conf.queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mld->fw, WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0);
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+ trans->conf.wide_cmd_header = true;
+
+ iwl_trans_op_mode_enter(trans, op_mode);
}
/*
@@ -359,13 +370,12 @@ iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
#define NUM_FW_LOAD_RETRIES 3
static struct iwl_op_mode *
-iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
struct ieee80211_hw *hw;
struct iwl_op_mode *op_mode;
struct iwl_mld *mld;
- u32 eckv_value;
int ret;
/* Allocate and initialize a new hardware device */
@@ -383,16 +393,13 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
+ /* we'll verify later it matches between commands */
+ mld->fw_rates_ver_3 = iwl_fw_lookup_cmd_ver(mld->fw, TX_CMD, 0) >= 11;
+
iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
iwl_mld_get_bios_tables(mld);
iwl_uefi_get_sgom_table(trans, &mld->fwrt);
- iwl_uefi_get_step_table(trans);
- if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
- IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
- else
- trans->ext_32khz_clock_valid = !!eckv_value;
- iwl_bios_setup_step(trans, &mld->fwrt);
mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
iwl_mld_hw_set_regulatory(mld);
@@ -411,10 +418,17 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
break;
}
+ if (!ret) {
+ mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
+ if (IS_ERR(mld->nvm_data)) {
+ IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
+ ret = PTR_ERR(mld->nvm_data);
+ }
+ }
+
if (ret) {
wiphy_unlock(mld->wiphy);
rtnl_unlock();
- iwl_fw_flush_dumps(&mld->fwrt);
goto err;
}
@@ -475,8 +489,9 @@ iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
iwl_mld_ptp_remove(mld);
iwl_mld_leds_exit(mld);
- wiphy_lock(mld->wiphy);
iwl_mld_thermal_exit(mld);
+
+ wiphy_lock(mld->wiphy);
iwl_mld_low_latency_stop(mld);
iwl_mld_deinit_time_sync(mld);
wiphy_unlock(mld->wiphy);
@@ -664,6 +679,13 @@ static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
{
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ /* SW reset can happen for TOP error w/o NIC error, so
+ * also abort scan here and set in_hw_restart, when we
+ * had a NIC error both were already done.
+ */
+ iwl_mld_report_scan_aborted(mld);
+ mld->fw_status.in_hw_restart = true;
+
/* Do restart only in the following conditions are met:
* - we consider the FW as running
* - The trigger that brought us here is defined as one that requires
@@ -692,7 +714,6 @@ static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
wiphy_lock(mld->wiphy);
- mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mld_stop_fw(mld);
mld->fw_status.in_d3 = false;
wiphy_unlock(mld->wiphy);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
index a4a16da6ebf3..1a2c44f44eff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -116,6 +116,7 @@
* @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU
* @monitor.cur_aid: current association id tracked by the sniffer
* @monitor.cur_bssid: current bssid tracked by the sniffer
+ * @monitor.ptp_time: set the Rx mactime using the device's PTP clock time
* @monitor.p80: primary channel position relative to he whole bandwidth, in
* steps of 80 MHz
* @fw_id_to_link_sta: maps a fw id of a sta to the corresponding
@@ -126,7 +127,6 @@
* cleanup using iwl_mld_free_internal_sta
* @netdetect: indicates the FW is in suspend mode with netdetect configured
* @p2p_device_vif: points to the p2p device vif if exists
- * @bt_is_active: indicates that BT is active
* @dev: pointer to device struct. For printing purposes
* @trans: pointer to the transport layer
* @cfg: pointer to the device configuration
@@ -154,6 +154,8 @@
* @radio_kill: bitmap of radio kill status
* @radio_kill.hw: radio is killed by hw switch
* @radio_kill.ct: radio is killed because the device it too hot
+ * @power_budget_mw: maximum cTDP power budget as defined for this system and
+ * device
* @addresses: device MAC addresses.
* @scan: instance of the scan object
* @wowlan: WoWLAN support data.
@@ -174,6 +176,7 @@
* @mcast_filter_cmd: pointer to the multicast filter command.
* @mgmt_tx_ant: stores the last TX antenna index; used for setting
* TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure).
+ * @fw_rates_ver_3: FW rates are in version 3
* @low_latency: low-latency manager.
* @tzone: thermal zone device's data
* @cooling_dev: cooling device's related data
@@ -184,6 +187,7 @@
* @ptp_data: data of the PTP clock
* @time_sync: time sync data.
* @ftm_initiator: FTM initiator data
+ * @last_bt_notif: last received BT Coex notif
*/
struct iwl_mld {
/* Add here fields that need clean up on restart */
@@ -201,19 +205,20 @@ struct iwl_mld {
#ifdef CONFIG_IWLWIFI_DEBUGFS
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
+ bool ptp_time;
#endif
} monitor;
#ifdef CONFIG_PM_SLEEP
bool netdetect;
#endif /* CONFIG_PM_SLEEP */
struct ieee80211_vif *p2p_device_vif;
- bool bt_is_active;
+ struct iwl_bt_coex_profile_notif last_bt_notif;
);
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
/* And here fields that survive a fw restart */
struct device *dev;
struct iwl_trans *trans;
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
const struct iwl_fw *fw;
struct ieee80211_hw *hw;
struct wiphy *wiphy;
@@ -241,6 +246,8 @@ struct iwl_mld {
ct:1;
} radio_kill;
+ u32 power_budget_mw;
+
struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
struct iwl_mld_scan scan;
#ifdef CONFIG_PM_SLEEP
@@ -266,6 +273,8 @@ struct iwl_mld {
u8 mgmt_tx_ant;
+ bool fw_rates_ver_3;
+
struct iwl_mld_low_latency low_latency;
bool ibss_manager;
@@ -286,7 +295,7 @@ struct iwl_mld {
memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \
sizeof((_ptr)->zeroed_on_hw_restart))
-/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+/* Cleanup function for struct iwl_mld, will be called in restart */
static inline void
iwl_cleanup_mld(struct iwl_mld *mld)
{
@@ -410,7 +419,7 @@ iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
bool is_lb = band == NL80211_BAND_2GHZ;
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate;
/* CCK is not allowed in 5 GHz */
@@ -482,7 +491,7 @@ iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct ieee80211_rx_status *rx_status, int queue);
void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
- const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ const struct iwl_rf_cfg *cfg, const struct iwl_fw *fw,
struct ieee80211_hw *hw, struct dentry *dbgfs_dir);
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index a870e169e265..dba5379ed009 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -52,7 +52,8 @@ static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
HOW(BT_COEX) \
HOW(CHAN_LOAD) \
HOW(RFI) \
- HOW(FW_REQUEST)
+ HOW(FW_REQUEST) \
+ HOW(INVALID)
static const char *
iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
@@ -325,23 +326,44 @@ static void
iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- struct iwl_esr_mode_notif *notif = (void *)data;
+ const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ enum iwl_mvm_fw_esr_recommendation action;
+ const struct iwl_esr_mode_notif *notif = NULL;
+
+ if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
+ ESR_MODE_NOTIF, 0) > 1) {
+ notif = (void *)data;
+ action = le32_to_cpu(notif->action);
+ } else {
+ const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
+
+ action = le32_to_cpu(notif_v1->action);
+ }
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
- switch (le32_to_cpu(notif->action)) {
+ switch (action) {
case ESR_RECOMMEND_LEAVE:
+ if (notif)
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW recommend leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
+
iwl_mld_exit_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_EXIT_FW_REQUEST,
iwl_mld_get_primary_link(vif));
break;
- case ESR_RECOMMEND_ENTER:
case ESR_FORCE_LEAVE:
+ if (notif)
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW force leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
+ fallthrough;
+ case ESR_RECOMMEND_ENTER:
default:
IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
- le32_to_cpu(notif->action));
+ action);
}
}
@@ -523,7 +545,7 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
}
/* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
struct iwl_mld_per_q_mpdu_counter *queue_counter =
&mld_sta->mpdu_counters[q];
@@ -635,6 +657,42 @@ s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
#undef RSSI_THRESHOLD
}
+#define IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH -69
+#define IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH -63
+#define IWL_MLD_BT_COEX_WIFI_LOSS_THRESH 7
+
+VISIBLE_IF_IWLWIFI_KUNIT
+bool
+iwl_mld_bt_allows_emlsr(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
+ bool check_entry)
+{
+ int bt_penalty, rssi_thresh;
+ s32 link_rssi;
+
+ if (WARN_ON_ONCE(!link->bss))
+ return false;
+
+ link_rssi = MBM_TO_DBM(link->bss->signal);
+ rssi_thresh = check_entry ?
+ IWL_MLD_BT_COEX_ENABLE_EMLSR_RSSI_THRESH :
+ IWL_MLD_BT_COEX_DISABLE_EMLSR_RSSI_THRESH;
+ /* No valid RSSI - force to take low rssi */
+ if (!link_rssi)
+ link_rssi = rssi_thresh - 1;
+
+ if (link_rssi > rssi_thresh)
+ bt_penalty = max(mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][0],
+ mld->last_bt_notif.wifi_loss_mid_high_rssi[PHY_BAND_24][1]);
+ else
+ bt_penalty = max(mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][0],
+ mld->last_bt_notif.wifi_loss_low_rssi[PHY_BAND_24][1]);
+
+ IWL_DEBUG_EHT(mld, "BT penalty for link-id %0X is %d\n",
+ link->link_id, bt_penalty);
+ return bt_penalty < IWL_MLD_BT_COEX_WIFI_LOSS_THRESH;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_bt_allows_emlsr);
+
static u32
iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
struct ieee80211_vif *vif,
@@ -643,13 +701,14 @@ iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
{
struct wiphy *wiphy = mld->wiphy;
struct ieee80211_bss_conf *conf;
- enum iwl_mld_emlsr_exit ret = 0;
+ u32 ret = 0;
conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
if (WARN_ON_ONCE(!conf))
- return false;
+ return IWL_MLD_EMLSR_EXIT_INVALID;
- if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
+ !iwl_mld_bt_allows_emlsr(mld, conf, true))
ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
if (link->signal <
@@ -731,7 +790,7 @@ iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
return 10;
}
-VISIBLE_IF_IWLWIFI_KUNIT bool
+static bool
iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
struct ieee80211_vif *vif,
const struct iwl_mld_link_sel_data *a,
@@ -772,8 +831,8 @@ iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
if (a->chandef->width <= b->chandef->width)
return true;
- bw_a = nl80211_chan_width_to_mhz(a->chandef->width);
- bw_b = nl80211_chan_width_to_mhz(b->chandef->width);
+ bw_a = cfg80211_chandef_get_width(a->chandef);
+ bw_b = cfg80211_chandef_get_width(b->chandef);
ratio = bw_a / bw_b;
switch (ratio) {
@@ -788,10 +847,9 @@ iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
return false;
}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr);
-static bool
-iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
+VISIBLE_IF_IWLWIFI_KUNIT u32
+iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *a,
struct iwl_mld_link_sel_data *b)
{
@@ -800,12 +858,36 @@ iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
u32 reason_mask = 0;
/* Per-link considerations */
- if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
- iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
- return false;
-
- if (a->chandef->chan->band == b->chandef->chan->band)
- reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+ reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true);
+ if (reason_mask)
+ return reason_mask;
+
+ reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false);
+ if (reason_mask)
+ return reason_mask;
+
+ if (a->chandef->chan->band == b->chandef->chan->band) {
+ const struct cfg80211_chan_def *c_low = a->chandef;
+ const struct cfg80211_chan_def *c_high = b->chandef;
+ u32 c_low_upper_edge, c_high_lower_edge;
+
+ if (c_low->chan->center_freq > c_high->chan->center_freq)
+ swap(c_low, c_high);
+
+ c_low_upper_edge = c_low->chan->center_freq +
+ cfg80211_chandef_get_width(c_low) / 2;
+ c_high_lower_edge = c_high->chan->center_freq -
+ cfg80211_chandef_get_width(c_high) / 2;
+
+ if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
+ c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) {
+ /* This case is fine - HW/FW can deal with it, there's
+ * enough separation between the two channels.
+ */
+ } else {
+ reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+ }
+ }
if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
@@ -818,11 +900,11 @@ iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
nl80211_chan_width_to_mhz(a->chandef->width),
nl80211_chan_width_to_mhz(b->chandef->width));
iwl_mld_print_emlsr_exit(mld, reason_mask);
- return false;
}
- return true;
+ return reason_mask;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state);
/* Calculation is done with fixed-point with a scaling factor of 1/256 */
#define SCALE_FACTOR 256
@@ -850,7 +932,7 @@ unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
*primary_id = a->link_id;
- if (!iwl_mld_valid_emlsr_pair(vif, a, b))
+ if (iwl_mld_emlsr_pair_state(vif, a, b))
return 0;
primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
@@ -892,8 +974,11 @@ static void _iwl_mld_select_links(struct iwl_mld *mld,
n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
&best_idx);
- if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ if (!n_data) {
+ IWL_DEBUG_EHT(mld,
+ "Couldn't find a valid grade for any link!\n");
return;
+ }
/* Default to selecting the single best link */
best_link = &data[best_idx];
@@ -961,27 +1046,41 @@ static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ const struct iwl_bt_coex_profile_notif zero_notif = {};
struct iwl_mld *mld = mld_vif->mld;
struct ieee80211_bss_conf *link;
unsigned int link_id;
+ const struct iwl_bt_coex_profile_notif *notif = &mld->last_bt_notif;
- if (!mld->bt_is_active) {
- iwl_mld_retry_emlsr(mld, vif);
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
- }
- /* BT is turned ON but we are not in EMLSR, nothing to do */
- if (!iwl_mld_emlsr_active(vif))
+ /* zeroed structure means that BT is OFF */
+ if (!memcmp(notif, &zero_notif, sizeof(*notif))) {
+ iwl_mld_retry_emlsr(mld, vif);
return;
-
- /* In EMLSR and BT is turned ON */
+ }
for_each_vif_active_link(vif, link, link_id) {
+ bool emlsr_active, emlsr_allowed;
+
if (WARN_ON(!link->chanreq.oper.chan))
continue;
- if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
- iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
+ if (link->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+ continue;
+
+ emlsr_active = iwl_mld_emlsr_active(vif);
+ emlsr_allowed = iwl_mld_bt_allows_emlsr(mld, link,
+ !emlsr_active);
+ if (emlsr_allowed && !emlsr_active) {
+ iwl_mld_retry_emlsr(mld, vif);
+ return;
+ }
+
+ if (!emlsr_allowed && emlsr_active) {
+ iwl_mld_exit_emlsr(mld, vif,
+ IWL_MLD_EMLSR_EXIT_BT_COEX,
iwl_mld_get_primary_link(vif));
return;
}
@@ -1074,3 +1173,69 @@ void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
iwl_mld_int_mlo_scan(mld, vif);
}
+
+static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct iwl_mld_sta *mld_sta;
+ bool *start = (void *)data;
+
+ /* check_tpt_wk is only used when TPT block isn't set */
+ if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT ||
+ !IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta)
+ return;
+
+ mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+ /* We only count for the AP sta in a MLO connection */
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ if (*start) {
+ wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk);
+ IWL_DEBUG_EHT(mld, "TPT check disabled\n");
+ return;
+ }
+
+ /* Clear the counters so we start from the beginning */
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
+ struct iwl_mld_per_q_mpdu_counter *queue_counter =
+ &mld_sta->mpdu_counters[q];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+
+ spin_unlock_bh(&queue_counter->lock);
+ }
+
+ /* Schedule the check in 5 seconds */
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk,
+ round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+ IWL_DEBUG_EHT(mld, "TPT check enabled\n");
+}
+
+void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld)
+{
+ bool start = true;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_ignore_tpt_iter,
+ &start);
+}
+
+void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
+{
+ bool start = false;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_ignore_tpt_iter,
+ &start);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
index 4fb1fdbe3df9..9afa3d6ea649 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
@@ -37,7 +37,7 @@ static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif)
return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION &&
ieee80211_vif_is_mld(vif) &&
vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP &&
- !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id);
+ !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->info.hw_rf_id);
}
static inline int
@@ -158,10 +158,16 @@ struct iwl_mld_link_sel_data {
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
-bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
- struct ieee80211_vif *vif,
- const struct iwl_mld_link_sel_data *a,
- const struct iwl_mld_link_sel_data *b);
+u32 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *a,
+ struct iwl_mld_link_sel_data *b);
+
+bool iwl_mld_bt_allows_emlsr(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ bool entry_criteria);
#endif
+void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld);
+void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld);
+
#endif /* __iwl_mld_mlo_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index fc18cba8aaa8..c0e62d46aba6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -183,47 +183,6 @@ static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
}
static void
-iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld,
- struct iwl_rx_packet *pkt)
-{
- unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
- struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
- struct ieee80211_rx_status rx_status = {};
- struct sk_buff *skb;
- u32 size = le32_to_cpu(sb->common.byte_count);
-
- if (size == 0)
- return;
-
- if (pkt_len < struct_size(sb, data, size))
- return;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb) {
- IWL_ERR(mld, "alloc_skb failed\n");
- return;
- }
-
- /* update rx_status according to the notification's metadata */
- rx_status.mactime = le64_to_cpu(sb->common.tsf);
- /* TSF as indicated by the firmware is at INA time */
- rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
- rx_status.device_timestamp = le32_to_cpu(sb->common.system_time);
- rx_status.band =
- iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band));
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel),
- rx_status.band);
-
- /* copy the data */
- skb_put_data(skb, sb->data, size);
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
-
- /* pass it as regular rx to mac80211 */
- ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
-}
-
-static void
iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt)
{
@@ -345,12 +304,15 @@ CMD_VERSIONS(session_prot_notif,
CMD_VERSIONS(missed_beacon_notif,
CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
CMD_VERSIONS(tx_resp_notif,
- CMD_VER_ENTRY(8, iwl_tx_resp))
+ CMD_VER_ENTRY(8, iwl_tx_resp)
+ CMD_VER_ENTRY(9, iwl_tx_resp))
CMD_VERSIONS(compressed_ba_notif,
CMD_VER_ENTRY(5, iwl_compressed_ba_notif)
- CMD_VER_ENTRY(6, iwl_compressed_ba_notif))
+ CMD_VER_ENTRY(6, iwl_compressed_ba_notif)
+ CMD_VER_ENTRY(7, iwl_compressed_ba_notif))
CMD_VERSIONS(tlc_notif,
- CMD_VER_ENTRY(3, iwl_tlc_update_notif))
+ CMD_VER_ENTRY(3, iwl_tlc_update_notif)
+ CMD_VER_ENTRY(4, iwl_tlc_update_notif))
CMD_VERSIONS(mu_mimo_grp_notif,
CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
CMD_VERSIONS(channel_switch_start_notif,
@@ -361,8 +323,6 @@ CMD_VERSIONS(ct_kill_notif,
CMD_VER_ENTRY(2, ct_kill_notif))
CMD_VERSIONS(temp_notif,
CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
-CMD_VERSIONS(stored_beacon_notif,
- CMD_VER_ENTRY(4, iwl_stored_beacon_notif))
CMD_VERSIONS(roc_notif,
CMD_VER_ENTRY(1, iwl_roc_notif))
CMD_VERSIONS(probe_resp_data_notif,
@@ -378,7 +338,8 @@ CMD_VERSIONS(bt_coex_notif,
CMD_VERSIONS(beacon_notification,
CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
CMD_VERSIONS(emlsr_mode_notif,
- CMD_VER_ENTRY(1, iwl_esr_mode_notif))
+ CMD_VER_ENTRY(1, iwl_esr_mode_notif_v1)
+ CMD_VER_ENTRY(2, iwl_esr_mode_notif))
CMD_VERSIONS(emlsr_trans_fail_notif,
CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
CMD_VERSIONS(uapsd_misbehaving_ap_notif,
@@ -473,8 +434,6 @@ const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
mu_mimo_grp_notif, RX_HANDLER_SYNC)
- RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
- stored_beacon_notif, RX_HANDLER_SYNC)
RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
probe_resp_data_notif)
RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
@@ -646,7 +605,7 @@ void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(queue >= mld->trans->num_rx_queues))
+ if (unlikely(queue >= mld->trans->info.num_rxqs))
return;
if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
@@ -707,10 +666,14 @@ void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
}
}
-void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld)
+void iwl_mld_cancel_async_notifications(struct iwl_mld *mld)
{
struct iwl_async_handler_entry *entry, *tmp;
+ lockdep_assert_wiphy(mld->wiphy);
+
+ wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
+
spin_lock_bh(&mld->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.h b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
index 2eaa1d4e138e..adcdd9dec192 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
@@ -15,7 +15,7 @@ void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk);
-void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld);
+void iwl_mld_cancel_async_notifications(struct iwl_mld *mld);
enum iwl_mld_object_type {
IWL_MLD_OBJECT_TYPE_NONE,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
index 2fbc8090088b..d5a32ee56b92 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/phy.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
@@ -50,6 +50,9 @@ static void iwl_mld_chanctx_usage_iter(void *_data, u8 *mac,
if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
continue;
+ if (vif->type == NL80211_IFTYPE_AP && link_conf->ftm_responder)
+ data->use_def = true;
+
if (iwl_mld_chanctx_fils_enabled(vif, data->ctx))
data->use_def = true;
}
@@ -153,3 +156,43 @@ int iwl_mld_phy_fw_action(struct iwl_mld *mld,
return ret;
}
+
+static u32 iwl_mld_get_phy_config(struct iwl_mld *mld)
+{
+ u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
+ FW_PHY_CFG_RX_CHAIN);
+ u32 valid_rx_ant = iwl_mld_get_valid_rx_ant(mld);
+ u32 valid_tx_ant = iwl_mld_get_valid_tx_ant(mld);
+
+ phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
+ valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+
+ return mld->fw->phy_config & phy_config;
+}
+
+int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld)
+{
+ const struct iwl_tlv_calib_ctrl *default_calib =
+ &mld->fw->default_calib[IWL_UCODE_REGULAR];
+ struct iwl_phy_cfg_cmd_v3 cmd = {
+ .phy_cfg = cpu_to_le32(iwl_mld_get_phy_config(mld)),
+ .calib_control.event_trigger = default_calib->event_trigger,
+ .calib_control.flow_trigger = default_calib->flow_trigger,
+ .phy_specific_cfg = mld->fwrt.phy_filters,
+ };
+
+ IWL_INFO(mld, "Sending Phy CFG command: 0x%x\n", cmd.phy_cfg);
+
+ return iwl_mld_send_cmd_pdu(mld, PHY_CONFIGURATION_CMD, &cmd);
+}
+
+void iwl_mld_update_phy_chandef(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ struct cfg80211_chan_def *chandef =
+ iwl_mld_get_chandef_from_chanctx(mld, ctx);
+
+ phy->chandef = *chandef;
+ iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.h b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
index 2212a89321b7..0deaf179f07c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
@@ -52,4 +52,9 @@ iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld,
struct ieee80211_chanctx_conf *ctx);
u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef);
+int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld);
+
+void iwl_mld_update_phy_chandef(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx);
+
#endif /* __iwl_mld_phy_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c
index 2f16c174b57e..8cc276041360 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c
@@ -253,6 +253,9 @@ static void iwl_mld_power_build_cmd(struct iwl_mld *mld,
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+ if (iwl_fw_lookup_cmd_ver(mld->fw, MAC_PM_POWER_TABLE, 0) >= 2)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK);
+
/* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */
if (link_conf->beacon_rate &&
(link_conf->beacon_rate->bitrate == 10 ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
index d5c3f853d96c..5ee38fc168c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
@@ -21,7 +21,7 @@
static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2)
{
- *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr);
+ *gp2 = iwl_read_prph(mld->trans, mld->trans->mac_cfg->base->gp2_reg_addr);
if (*gp2 == 0x5a5a5a5a)
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index a75af8c1e8ab..326c300470ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -63,9 +63,9 @@ void iwl_mld_get_bios_tables(struct iwl_mld *mld)
/* we don't fail if the table is not available */
}
- ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
- if (ret)
- IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret);
+ iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
+
+ iwl_bios_get_phy_filters(&mld->fwrt);
}
static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
index b87faca23ceb..e85f45bce79a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -31,13 +31,54 @@ iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif)
*result = ret;
}
+struct iwl_mld_roc_iter_data {
+ enum iwl_roc_activity activity;
+ struct ieee80211_vif *vif;
+ bool found;
+};
+
+static void iwl_mld_find_roc_vif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_roc_iter_data *roc_data = data;
+
+ if (mld_vif->roc_activity != roc_data->activity)
+ return;
+
+ /* The FW supports one ROC of each type simultaneously */
+ if (WARN_ON(roc_data->found)) {
+ roc_data->vif = NULL;
+ return;
+ }
+
+ roc_data->found = true;
+ roc_data->vif = vif;
+}
+
+static struct ieee80211_vif *
+iwl_mld_find_roc_vif(struct iwl_mld *mld, enum iwl_roc_activity activity)
+{
+ struct iwl_mld_roc_iter_data roc_data = {
+ .activity = activity,
+ .found = false,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_find_roc_vif_iter,
+ &roc_data);
+
+ return roc_data.vif;
+}
+
int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *channel, int duration,
enum ieee80211_roc_type type)
{
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- struct iwl_mld_int_sta *aux_sta;
+ struct iwl_mld_int_sta *aux_sta = &mld_vif->aux_sta;
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
};
@@ -49,38 +90,40 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
lockdep_assert_wiphy(mld->wiphy);
- ieee80211_iterate_active_interfaces_mtx(mld->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mld_vif_iter_emlsr_block_roc,
- &ret);
- if (ret)
- return ret;
-
- /* TODO: task=Hotspot 2.0 */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ vif->type != NL80211_IFTYPE_STATION) {
IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n",
vif->type);
return -EOPNOTSUPP;
}
- switch (type) {
- case IEEE80211_ROC_TYPE_NORMAL:
- activity = ROC_ACTIVITY_P2P_DISC;
- break;
- case IEEE80211_ROC_TYPE_MGMT_TX:
- activity = ROC_ACTIVITY_P2P_NEG;
- break;
- default:
- WARN_ONCE(1, "Got an invalid P2P ROC type\n");
- return -EINVAL;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+ } else {
+ activity = ROC_ACTIVITY_HOTSPOT;
}
- if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES))
+ /* The FW supports one ROC of each type simultaneously */
+ if (WARN_ON(iwl_mld_find_roc_vif(mld, activity)))
return -EBUSY;
- /* No MLO on P2P device */
- aux_sta = &mld_vif->deflink.aux_sta;
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_roc,
+ &ret);
+ if (ret)
+ return ret;
ret = iwl_mld_add_aux_sta(mld, aux_sta);
if (ret)
@@ -91,9 +134,6 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
cmd.channel_info.channel = cpu_to_le32(channel->hw_value);
cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band);
cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20;
- /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC
- * on the BSS vif
- */
cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY);
cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
@@ -105,6 +145,7 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
return ret;
}
+
mld_vif->roc_activity = activity;
return 0;
@@ -136,9 +177,9 @@ static void iwl_mld_destroy_roc(struct iwl_mld *mld,
* we can flush the Tx on the queues
*/
- iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id);
+ iwl_mld_flush_link_sta_txqs(mld, mld_vif->aux_sta.sta_id);
- iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf);
+ iwl_mld_remove_aux_sta(mld, vif);
}
int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
@@ -156,8 +197,8 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
lockdep_assert_wiphy(mld->wiphy);
- /* TODO: task=Hotspot 2.0 */
- if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ vif->type != NL80211_IFTYPE_STATION))
return -EOPNOTSUPP;
/* No roc activity running it's probably already done */
@@ -192,10 +233,10 @@ void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
{
const struct iwl_roc_notif *notif = (void *)pkt->data;
u32 activity = le32_to_cpu(notif->activity);
- /* TODO: task=Hotspot 2.0 - roc can run on BSS */
- struct ieee80211_vif *vif = mld->p2p_device_vif;
struct iwl_mld_vif *mld_vif;
+ struct ieee80211_vif *vif;
+ vif = iwl_mld_find_roc_vif(mld, activity);
if (WARN_ON(!vif))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index c4f189bcece2..ce0093d5c638 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -33,17 +33,17 @@ struct iwl_mld_rx_phy_data {
u32 gp2_on_air_rise;
u16 phy_info;
u8 energy_a, energy_b;
- u8 channel;
};
static void
-iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc,
+iwl_mld_fill_phy_data(struct iwl_mld *mld,
+ struct iwl_rx_mpdu_desc *desc,
struct iwl_mld_rx_phy_data *phy_data)
{
phy_data->phy_info = le16_to_cpu(desc->phy_info);
- phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data->rate_n_flags = iwl_v3_rate_from_v2_v3(desc->v3.rate_n_flags,
+ mld->fw_rates_ver_3);
phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
- phy_data->channel = desc->v3.channel;
phy_data->energy_a = desc->v3.energy_a;
phy_data->energy_b = desc->v3.energy_b;
phy_data->data0 = desc->v3.phy_data0;
@@ -1162,8 +1162,6 @@ static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
struct iwl_mld_rx_phy_data *phy_data,
- struct iwl_rx_mpdu_desc *mpdu_desc,
- struct ieee80211_hdr *hdr,
int queue)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
@@ -1172,47 +1170,15 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
- if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr)))
- return;
-
- /* Keep packets with CRC errors (and with overrun) for monitor mode
- * (otherwise the firmware discards them) but mark them as bad.
- */
- if (phy_data->with_data &&
- (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
- !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) {
- IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(mpdu_desc->status));
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- }
-
phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
- if (phy_data->with_data &&
- likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
- rx_status->mactime =
- le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
-
- /* TSF as indicated by the firmware is at INA time */
- rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
- } else {
+ if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
phy_data->info_type =
le32_get_bits(phy_data->data1,
IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
- }
-
- /* management stuff on default queue */
- if (!queue && phy_data->with_data &&
- unlikely(ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))) {
- rx_status->boottime_ns = ktime_get_boottime_ns();
-
- if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
- mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND;
- }
/* set the preamble flag if appropriate */
- if (format == RATE_MCS_CCK_MSK &&
+ if (format == RATE_MCS_MOD_TYPE_CCK &&
phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
@@ -1237,7 +1203,7 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
}
/* must be before L-SIG data */
- if (format == RATE_MCS_HE_MSK)
+ if (format == RATE_MCS_MOD_TYPE_HE)
iwl_mld_rx_he(mld, skb, phy_data, queue);
iwl_mld_decode_lsig(skb, phy_data);
@@ -1245,40 +1211,53 @@ static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
rx_status->device_timestamp = phy_data->gp2_on_air_rise;
/* using TLV format and must be after all fixed len fields */
- if (format == RATE_MCS_EHT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_EHT)
iwl_mld_rx_eht(mld, skb, phy_data, queue);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (unlikely(mld->monitor.on))
+ if (unlikely(mld->monitor.on)) {
iwl_mld_add_rtap_sniffer_config(mld, skb);
+
+ if (mld->monitor.ptp_time) {
+ u64 adj_time =
+ iwl_mld_ptp_get_adj_time(mld,
+ phy_data->gp2_on_air_rise *
+ NSEC_PER_USEC);
+
+ rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
+ rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
+ rx_status->flag &= ~RX_FLAG_MACTIME;
+ }
+ }
#endif
- if (format != RATE_MCS_CCK_MSK && is_sgi)
+ if (format != RATE_MCS_MOD_TYPE_CCK && is_sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
switch (format) {
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
break;
- case RATE_MCS_VHT_MSK:
- case RATE_MCS_HE_MSK:
- case RATE_MCS_EHT_MSK:
- if (format == RATE_MCS_VHT_MSK) {
+ case RATE_MCS_MOD_TYPE_VHT:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
+ if (format == RATE_MCS_MOD_TYPE_VHT) {
rx_status->encoding = RX_ENC_VHT;
- } else if (format == RATE_MCS_HE_MSK) {
+ } else if (format == RATE_MCS_MOD_TYPE_HE) {
rx_status->encoding = RX_ENC_HE;
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
- } else if (format == RATE_MCS_EHT_MSK) {
+ } else if (format == RATE_MCS_MOD_TYPE_EHT) {
rx_status->encoding = RX_ENC_EHT;
}
- rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->nss = u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
break;
@@ -1735,15 +1714,11 @@ static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld,
}
static void
-iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data,
- struct iwl_rx_mpdu_desc *mpdu_desc,
- struct ieee80211_rx_status *rx_status)
+iwl_mld_fill_rx_status_band_freq(struct ieee80211_rx_status *rx_status,
+ u8 band, u8 channel)
{
- enum nl80211_band band;
-
- band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx);
rx_status->band = iwl_mld_phy_band_to_nl80211(band);
- rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
}
@@ -1758,7 +1733,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
struct sk_buff *skb;
size_t mpdu_desc_size = sizeof(*mpdu_desc);
bool drop = false;
- u8 crypto_len = 0;
+ u8 crypto_len = 0, band;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
u32 mpdu_len;
enum iwl_mld_reorder_result reorder_res;
@@ -1788,7 +1763,7 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
hdr = (void *)(pkt->data + mpdu_desc_size);
- iwl_mld_fill_phy_data(mpdu_desc, &phy_data);
+ iwl_mld_fill_phy_data(mld, mpdu_desc, &phy_data);
if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
/* If the device inserted padding it means that (it thought)
@@ -1802,7 +1777,11 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
/* this is needed early */
- iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status);
+ band = u8_get_bits(mpdu_desc->mac_phy_band,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK);
+ iwl_mld_fill_rx_status_band_freq(rx_status, band,
+ mpdu_desc->v3.channel);
+
rcu_read_lock();
@@ -1814,7 +1793,36 @@ void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU))
iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status);
- iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue);
+ /* Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+ IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(mpdu_desc->status));
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ rx_status->mactime =
+ le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ }
+
+ /* management stuff on default queue */
+ if (!queue && unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))) {
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
+ if (mld->scan.pass_all_sched_res ==
+ SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+ mld->scan.pass_all_sched_res =
+ SCHED_SCAN_PASS_ALL_STATE_FOUND;
+ }
+
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, queue);
if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue,
le32_to_cpu(pkt->len_n_flags), &crypto_len))
@@ -1857,7 +1865,7 @@ void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
enum iwl_mld_internal_rxq_notif_type type,
const void *notif_payload, u32 notif_payload_size)
{
- u8 num_rx_queues = mld->trans->num_rx_queues;
+ u8 num_rx_queues = mld->trans->info.num_rxqs;
struct {
struct iwl_rxq_sync_cmd sync_cmd;
struct iwl_mld_internal_rxq_notif notif;
@@ -1956,6 +1964,7 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct sk_buff *skb;
u32 format, rssi;
+ u8 channel;
if (unlikely(mld->fw_status.in_hw_restart))
return;
@@ -1968,14 +1977,16 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
desc = (void *)pkt->data;
rssi = le32_to_cpu(desc->rssi);
+ channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
+
phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
- phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
phy_data.data0 = desc->phy_info[0];
phy_data.data1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
- phy_data.rate_n_flags = le32_to_cpu(desc->rate);
+ phy_data.rate_n_flags = iwl_v3_rate_from_v2_v3(desc->rate,
+ mld->fw_rates_ver_3);
phy_data.with_data = false;
BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec));
@@ -2018,13 +2029,13 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
break;
}
- rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel,
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
- iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue);
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, queue);
/* No more radiotap info should be added after this point.
* Mark it as mac header for upper layers to know where
@@ -2037,17 +2048,17 @@ void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
* may be up to 8 spatial streams.
*/
switch (format) {
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
break;
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[2],
RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 7ec04318ec2f..3fce7cd2d512 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -1920,6 +1920,9 @@ void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n");
ieee80211_scan_completed(mld->hw, &info);
+
+ /* Scan is over, we can check again the tpt counters */
+ iwl_mld_stop_ignoring_tpt_updates(mld);
} else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mld->hw);
mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
index 332a7aecec2d..8fb51209b4a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -401,9 +401,11 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
const struct iwl_sta_cfg_cmd *cmd)
{
- int ret = iwl_mld_send_cmd_pdu(mld,
- WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
- cmd);
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
+ int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ?
+ sizeof(*cmd) :
+ sizeof(struct iwl_sta_cfg_cmd_v1);
+ int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len);
if (ret)
IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
@@ -660,7 +662,7 @@ iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
if (mld->fw_status.in_hw_restart)
return 0;
- dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data),
+ dup_data = kcalloc(mld->trans->info.num_rxqs, sizeof(*dup_data),
GFP_KERNEL);
if (!dup_data)
return -ENOMEM;
@@ -673,7 +675,7 @@ iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
* This thus allows receiving a packet with seqno 0 and the
* retry bit set as the very first packet on a new TID.
*/
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
memset(dup_data[q].last_seq, 0xff,
sizeof(dup_data[q].last_seq));
mld_sta->dup_data = dup_data;
@@ -695,13 +697,13 @@ static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
sta->tdls || !ieee80211_vif_is_mld(vif))
return;
- mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues,
+ mld_sta->mpdu_counters = kcalloc(mld->trans->info.num_rxqs,
sizeof(*mld_sta->mpdu_counters),
GFP_KERNEL);
if (!mld_sta->mpdu_counters)
return;
- for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ for (int q = 0; q < mld->trans->info.num_rxqs; q++)
spin_lock_init(&mld_sta->mpdu_counters[q].lock);
}
@@ -947,7 +949,7 @@ static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
int queue, size;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
- mld->trans->cfg->min_txq_size);
+ mld->trans->mac_cfg->base->min_txq_size);
queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
IWL_WATCHDOG_DISABLED);
@@ -1087,6 +1089,24 @@ int iwl_mld_add_aux_sta(struct iwl_mld *mld,
0, NULL, IWL_MAX_TID_COUNT);
}
+int iwl_mld_add_mon_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR))
+ return -EINVAL;
+
+ return iwl_mld_add_internal_sta(mld, &mld_link->mon_sta,
+ STATION_TYPE_BCAST_MGMT,
+ mld_link->fw_id, NULL,
+ IWL_MAX_TID_COUNT);
+}
+
static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
struct iwl_mld_int_sta *internal_sta,
bool flush, u8 tid)
@@ -1140,6 +1160,19 @@ void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
}
void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ vif->type != NL80211_IFTYPE_STATION))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_vif->aux_sta, false,
+ IWL_MAX_TID_COUNT);
+}
+
+void iwl_mld_remove_mon_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link)
{
@@ -1148,11 +1181,10 @@ void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
if (WARN_ON(!mld_link))
return;
- /* TODO: Hotspot 2.0 */
- if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ if (WARN_ON(vif->type != NL80211_IFTYPE_MONITOR))
return;
- iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false,
+ iwl_mld_remove_internal_sta(mld, &mld_link->mon_sta, false,
IWL_MAX_TID_COUNT);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.h b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
index ddcffd7b9fde..1897b121aae2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
@@ -247,6 +247,10 @@ int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
int iwl_mld_add_aux_sta(struct iwl_mld *mld,
struct iwl_mld_int_sta *internal_sta);
+int iwl_mld_add_mon_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
@@ -256,6 +260,9 @@ void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
struct ieee80211_bss_conf *link);
void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_remove_mon_sta(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
index 0715bbc31031..f633cb1cf510 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -191,11 +191,12 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
break;
}
- if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) {
+ if (format == RATE_MCS_MOD_TYPE_CCK ||
+ format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) {
int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
/* add the offset needed to get to the legacy ofdm indices */
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
rate += IWL_FIRST_OFDM_RATE;
switch (rate) {
@@ -240,7 +241,7 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
- if (format == RATE_MCS_HT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_HT)
rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags);
else
rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
@@ -249,10 +250,10 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -293,10 +294,10 @@ static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
break;
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
break;
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
break;
}
@@ -467,12 +468,18 @@ static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
old_load = phy->avg_channel_load_not_by_us;
new_load = le32_to_cpu(per_phy[phy->fw_id].channel_load_not_by_us);
- if (IWL_FW_CHECK(phy->mld, new_load > 100, "Invalid channel load %u\n",
- new_load))
+
+ if (IWL_FW_CHECK(phy->mld,
+ new_load != IWL_STATS_UNKNOWN_CHANNEL_LOAD &&
+ new_load > 100,
+ "Invalid channel load %u\n", new_load))
return;
- /* give a weight of 0.5 for the old value */
- phy->avg_channel_load_not_by_us = (new_load >> 1) + (old_load >> 1);
+ if (new_load != IWL_STATS_UNKNOWN_CHANNEL_LOAD) {
+ /* update giving a weight of 0.5 for the old value */
+ phy->avg_channel_load_not_by_us = (new_load >> 1) +
+ (old_load >> 1);
+ }
iwl_mld_emlsr_check_chan_load(hw, phy, old_load);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
index 36317feb923b..3e2ae6020613 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o
+iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o emlsr_with_bt.o
ccflags-y += -I$(src)/../
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
index 1fd664be1a7c..29b0248cec3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for channel helper functions
*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <kunit/static_stub.h>
@@ -474,14 +474,14 @@ static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void)
KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc);
mpdu_desc->reorder_data |=
- cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK,
- param->rx_pkt.baid));
+ le32_encode_bits(param->rx_pkt.baid,
+ IWL_RX_MPDU_REORDER_BAID_MASK);
mpdu_desc->reorder_data |=
- cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK,
- param->rx_pkt.sn));
+ le32_encode_bits(param->rx_pkt.sn,
+ IWL_RX_MPDU_REORDER_SN_MASK);
mpdu_desc->reorder_data |=
- cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK,
- param->rx_pkt.nssn));
+ le32_encode_bits(param->rx_pkt.nssn,
+ IWL_RX_MPDU_REORDER_NSSN_MASK);
if (param->rx_pkt.old_sn)
mpdu_desc->reorder_data |=
cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c
new file mode 100644
index 000000000000..91556ee5c142
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for link selection functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "mlo.h"
+
+static const struct emlsr_with_bt_test_case {
+ const char *desc;
+ struct {
+ struct iwl_bt_coex_profile_notif notif;
+ s32 signal;
+ bool check_entry;
+ } input;
+ bool emlsr_allowed;
+} emlsr_with_bt_cases[] = {
+ {
+ .desc = "BT penalty(exit) with low rssi 4.5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {7, 9},
+ .signal = -69,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(exit) from high rssi 5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {5, 5},
+ .signal = -68,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(exit) with low rssi 8: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {4, 5},
+ .signal = -69,
+ .check_entry = false,
+ },
+ .emlsr_allowed = false,
+ },
+ {
+ .desc = "BT penalty(exit) from high rssi 9: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {9, 9},
+ .signal = -68,
+ .check_entry = false,
+ },
+ .emlsr_allowed = false,
+ },
+ {
+ .desc = "BT penalty(entry) with low rssi 4.5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {7, 9},
+ .signal = -63,
+ .check_entry = true,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(entry) from high rssi 5: emlsr allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {5, 5},
+ .signal = -62,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(entry) with low rssi 8: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {7, 9},
+ .notif.wifi_loss_mid_high_rssi[1] = {4, 5},
+ .signal = -63,
+ .check_entry = false,
+ },
+ .emlsr_allowed = true,
+ },
+ {
+ .desc = "BT penalty(entry) from high rssi 9: emlsr not allowed",
+ .input = {
+ .notif.wifi_loss_low_rssi[1] = {4, 5},
+ .notif.wifi_loss_mid_high_rssi[1] = {9, 9},
+ .signal = -62,
+ .check_entry = true,
+ },
+ .emlsr_allowed = false,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(emlsr_with_bt, emlsr_with_bt_cases, desc);
+
+static void test_emlsr_with_bt(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ const struct emlsr_with_bt_test_case *test_param =
+ (const void *)(test->param_value);
+ struct ieee80211_vif *vif =
+ iwlmld_kunit_add_vif(true, NL80211_IFTYPE_STATION);
+ struct ieee80211_bss_conf *link = iwlmld_kunit_add_link(vif, 1);
+ bool actual_value = false;
+
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
+ /* Extract test case parameters */
+ link->bss->signal = DBM_TO_MBM(test_param->input.signal);
+ memcpy(&mld->last_bt_notif, &test_param->input.notif,
+ sizeof(struct iwl_bt_coex_profile_notif));
+
+ actual_value = iwl_mld_bt_allows_emlsr(mld, link,
+ test_param->input.check_entry);
+ /* Assert that the returned value matches the expected emlsr_allowed */
+ KUNIT_EXPECT_EQ(test, actual_value, test_param->emlsr_allowed);
+}
+
+static struct kunit_case emlsr_with_bt_test_cases[] = {
+ KUNIT_CASE_PARAM(test_emlsr_with_bt, emlsr_with_bt_gen_params),
+ {},
+};
+
+static struct kunit_suite emlsr_with_bt = {
+ .name = "iwlmld-emlsr-with-bt-tests",
+ .test_cases = emlsr_with_bt_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(emlsr_with_bt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
index 4e189bf8b3fb..0e3b9417dd63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for channel helper functions
*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <kunit/test.h>
@@ -30,10 +30,10 @@ static void test_hcmd_names_sorted(struct kunit *test)
static void test_hcmd_names_for_rx(struct kunit *test)
{
static struct iwl_trans t = {
- .command_groups = iwl_mld_groups,
+ .conf.command_groups = iwl_mld_groups,
};
- t.command_groups_size = global_iwl_mld_goups_size;
+ t.conf.command_groups_size = global_iwl_mld_goups_size;
for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) {
const struct iwl_rx_handler *rxh;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
index 295dcfd3f85d..94a037bec1fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
@@ -32,7 +32,7 @@ static const struct link_grading_test_case {
.desc = "channel util of 128 (50%)",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.active = false,
.has_chan_util_elem = true,
.chan_util = 128,
@@ -43,7 +43,7 @@ static const struct link_grading_test_case {
.desc = "channel util of 180 (70%)",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.active = false,
.has_chan_util_elem = true,
.chan_util = 180,
@@ -54,7 +54,7 @@ static const struct link_grading_test_case {
.desc = "channel util of 180 (70%), channel load by us of 10%",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.has_chan_util_elem = true,
.chan_util = 180,
.active = true,
@@ -66,7 +66,7 @@ static const struct link_grading_test_case {
.desc = "no channel util element",
.input.link = {
.link_id = 0,
- .chandef = &chandef_2ghz,
+ .chandef = &chandef_2ghz_20mhz,
.active = true,
},
.expected_grade = 120,
@@ -132,7 +132,7 @@ static void test_link_grading(struct kunit *test)
bool active = test_param->input.link.active;
u16 valid_links;
struct iwl_mld_kunit_link assoc_link = {
- .band = test_param->input.link.chandef->chan->band,
+ .chandef = test_param->input.link.chandef,
};
/* If the link is not active, use a different link as the assoc link */
@@ -172,108 +172,150 @@ static struct kunit_suite link_selection = {
kunit_test_suite(link_selection);
-static const struct channel_load_case {
+static const struct link_pair_case {
const char *desc;
+ const struct cfg80211_chan_def *chandef_a, *chandef_b;
bool low_latency_vif;
u32 chan_load_not_by_us;
- enum nl80211_chan_width bw_a;
- enum nl80211_chan_width bw_b;
bool primary_link_active;
- bool expected_result;
-} channel_load_cases[] = {
+ u32 expected_result;
+} link_pair_cases[] = {
{
.desc = "Unequal bandwidth, primary link inactive, EMLSR not allowed",
.low_latency_vif = false,
.primary_link_active = false,
- .bw_a = NL80211_CHAN_WIDTH_40,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = false,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_6ghz_20mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
},
{
.desc = "Equal bandwidths, sufficient channel load, EMLSR allowed",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 11,
- .bw_a = NL80211_CHAN_WIDTH_40,
- .bw_b = NL80211_CHAN_WIDTH_40,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_6ghz_40mhz,
+ .expected_result = 0,
},
{
.desc = "Equal bandwidths, insufficient channel load, EMLSR not allowed",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 6,
- .bw_a = NL80211_CHAN_WIDTH_80,
- .bw_b = NL80211_CHAN_WIDTH_80,
- .expected_result = false,
+ .chandef_a = &chandef_5ghz_80mhz,
+ .chandef_b = &chandef_6ghz_80mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
},
{
.desc = "Low latency VIF, sufficient channel load, EMLSR allowed",
.low_latency_vif = true,
.primary_link_active = true,
.chan_load_not_by_us = 6,
- .bw_a = NL80211_CHAN_WIDTH_160,
- .bw_b = NL80211_CHAN_WIDTH_160,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_160mhz,
+ .chandef_b = &chandef_6ghz_160mhz,
+ .expected_result = 0,
},
{
.desc = "Different bandwidths (2x ratio), primary link load permits EMLSR",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 30,
- .bw_a = NL80211_CHAN_WIDTH_40,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_6ghz_20mhz,
+ .expected_result = 0,
},
{
.desc = "Different bandwidths (4x ratio), primary link load permits EMLSR",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 45,
- .bw_a = NL80211_CHAN_WIDTH_80,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = true,
+ .chandef_a = &chandef_5ghz_80mhz,
+ .chandef_b = &chandef_6ghz_20mhz,
+ .expected_result = 0,
},
{
.desc = "Different bandwidths (16x ratio), primary link load insufficient",
.low_latency_vif = false,
.primary_link_active = true,
.chan_load_not_by_us = 45,
- .bw_a = NL80211_CHAN_WIDTH_320,
- .bw_b = NL80211_CHAN_WIDTH_20,
- .expected_result = false,
+ .chandef_a = &chandef_6ghz_320mhz,
+ .chandef_b = &chandef_5ghz_20mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
+ },
+ {
+ .desc = "Same band not allowed (2.4 GHz)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_2ghz_20mhz,
+ .chandef_b = &chandef_2ghz_11_20mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND,
+ },
+ {
+ .desc = "Same band not allowed (5 GHz)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_5ghz_40mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND,
+ },
+ {
+ .desc = "Same band allowed (5 GHz separated)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_5ghz_40mhz,
+ .chandef_b = &chandef_5ghz_120_40mhz,
+ .expected_result = 0,
+ },
+ {
+ .desc = "Same band not allowed (6 GHz)",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .chandef_a = &chandef_6ghz_160mhz,
+ .chandef_b = &chandef_6ghz_221_160mhz,
+ .expected_result = IWL_MLD_EMLSR_EXIT_EQUAL_BAND,
},
};
-KUNIT_ARRAY_PARAM_DESC(channel_load, channel_load_cases, desc);
+KUNIT_ARRAY_PARAM_DESC(link_pair, link_pair_cases, desc);
-static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test)
+static void test_iwl_mld_link_pair_allows_emlsr(struct kunit *test)
{
- const struct channel_load_case *params = test->param_value;
+ const struct link_pair_case *params = test->param_value;
struct iwl_mld *mld = test->priv;
struct ieee80211_vif *vif;
- struct cfg80211_chan_def chandef_a, chandef_b;
- struct iwl_mld_link_sel_data a = {.chandef = &chandef_a,
- .link_id = 4};
- struct iwl_mld_link_sel_data b = {.chandef = &chandef_b,
- .link_id = 5};
+ struct ieee80211_bss_conf *link;
+ /* link A is the primary and link B is the secondary */
+ struct iwl_mld_link_sel_data a = {
+ .chandef = params->chandef_a,
+ .link_id = 4,
+ };
+ struct iwl_mld_link_sel_data b = {
+ .chandef = params->chandef_b,
+ .link_id = 5,
+ };
struct iwl_mld_kunit_link assoc_link = {
+ .chandef = params->primary_link_active ? a.chandef : b.chandef,
.id = params->primary_link_active ? a.link_id : b.link_id,
- .bandwidth = params->primary_link_active ? params->bw_a : params->bw_b,
};
- bool result;
+ u32 result;
vif = iwlmld_kunit_setup_mlo_assoc(BIT(a.link_id) | BIT(b.link_id),
&assoc_link);
- chandef_a.width = params->bw_a;
- chandef_b.width = params->bw_b;
-
if (params->low_latency_vif)
iwl_mld_vif_from_mac80211(vif)->low_latency_causes = 1;
wiphy_lock(mld->wiphy);
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[a.link_id]);
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[b.link_id]);
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
/* Simulate channel load */
if (params->primary_link_active) {
struct iwl_mld_phy *phy =
@@ -282,22 +324,22 @@ static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test)
phy->avg_channel_load_not_by_us = params->chan_load_not_by_us;
}
- result = iwl_mld_channel_load_allows_emlsr(mld, vif, &a, &b);
+ result = iwl_mld_emlsr_pair_state(vif, &a, &b);
wiphy_unlock(mld->wiphy);
KUNIT_EXPECT_EQ(test, result, params->expected_result);
}
-static struct kunit_case channel_load_criteria_test_cases[] = {
- KUNIT_CASE_PARAM(test_iwl_mld_channel_load_allows_emlsr, channel_load_gen_params),
+static struct kunit_case link_pair_criteria_test_cases[] = {
+ KUNIT_CASE_PARAM(test_iwl_mld_link_pair_allows_emlsr, link_pair_gen_params),
{}
};
-static struct kunit_suite channel_load_criteria_tests = {
- .name = "iwlmld_channel_load_allows_emlsr",
- .test_cases = channel_load_criteria_test_cases,
+static struct kunit_suite link_pair_criteria_tests = {
+ .name = "iwlmld_link_pair_allows_emlsr",
+ .test_cases = link_pair_criteria_test_cases,
.init = iwlmld_kunit_test_init,
};
-kunit_test_suite(channel_load_criteria_tests);
+kunit_test_suite(link_pair_criteria_tests);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
index 4a4eaa134bd3..69a0d67858bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
@@ -63,11 +63,11 @@ static void test_missed_beacon(struct kunit *test)
struct iwl_rx_packet *pkt;
struct iwl_mld_kunit_link link1 = {
.id = 0,
- .band = NL80211_BAND_6GHZ,
+ .chandef = &chandef_6ghz_160mhz,
};
struct iwl_mld_kunit_link link2 = {
.id = 1,
- .band = NL80211_BAND_5GHZ,
+ .chandef = &chandef_5ghz_80mhz,
};
kunit_activate_static_stub(test, ieee80211_connection_loss,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
index 9712ee696509..26cf27be762d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
@@ -24,7 +24,7 @@ int iwlmld_kunit_test_init(struct kunit *test)
{
struct iwl_mld *mld;
struct iwl_trans *trans;
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
struct iwl_fw *fw;
struct ieee80211_hw *hw;
@@ -146,7 +146,7 @@ iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id)
}
struct ieee80211_chanctx_conf *
-iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
+iwlmld_kunit_add_chanctx(const struct cfg80211_chan_def *def)
{
struct kunit *test = kunit_get_current_test();
struct iwl_mld *mld = test->priv;
@@ -346,8 +346,7 @@ iwlmld_kunit_setup_assoc(bool mlo, struct iwl_mld_kunit_link *assoc_link)
else
link = &vif->bss_conf;
- chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->band,
- assoc_link->bandwidth);
+ chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->chandef);
wiphy_lock(mld->wiphy);
iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
@@ -428,7 +427,7 @@ struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1,
link = wiphy_dereference(mld->wiphy, vif->link_conf[link2->id]);
KUNIT_EXPECT_NOT_NULL(test, link);
- chan_ctx = iwlmld_kunit_add_chanctx(link2->band, link2->bandwidth);
+ chan_ctx = iwlmld_kunit_add_chanctx(link2->chandef);
iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
wiphy_unlock(mld->wiphy);
@@ -472,3 +471,33 @@ struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
return iwl_mld_phy_from_mac80211(chanctx);
}
+
+static const struct chandef_case {
+ const char *desc;
+ const struct cfg80211_chan_def *chandef;
+} chandef_cases[] = {
+#define CHANDEF(c, ...) { .desc = "chandef " #c " valid", .chandef = &c, },
+ CHANDEF_LIST
+#undef CHANDEF
+};
+
+KUNIT_ARRAY_PARAM_DESC(chandef, chandef_cases, desc);
+
+static void test_iwl_mld_chandef_valid(struct kunit *test)
+{
+ const struct chandef_case *params = test->param_value;
+
+ KUNIT_EXPECT_EQ(test, true, cfg80211_chandef_valid(params->chandef));
+}
+
+static struct kunit_case chandef_test_cases[] = {
+ KUNIT_CASE_PARAM(test_iwl_mld_chandef_valid, chandef_gen_params),
+ {}
+};
+
+static struct kunit_suite chandef_tests = {
+ .name = "iwlmld_valid_test_chandefs",
+ .test_cases = chandef_test_cases,
+};
+
+kunit_test_suite(chandef_tests);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
index d3723653cf1b..edf8eef4e81a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
@@ -14,9 +14,8 @@ struct iwl_mld;
int iwlmld_kunit_test_init(struct kunit *test);
struct iwl_mld_kunit_link {
+ const struct cfg80211_chan_def *chandef;
u8 id;
- enum nl80211_band band;
- enum nl80211_chan_width bandwidth;
};
enum nl80211_iftype;
@@ -42,50 +41,57 @@ static struct ieee80211_channel _name = { \
.hw_value = (_freq), \
}
-#define CHANDEF(_name, _channel, _freq1, _width) \
-__maybe_unused static struct cfg80211_chan_def _name = { \
- .chan = &(_channel), \
- .center_freq1 = (_freq1), \
- .width = (_width), \
-}
-
CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412);
+CHANNEL(chan_2ghz_11, NL80211_BAND_2GHZ, 2462);
CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200);
+CHANNEL(chan_5ghz_120, NL80211_BAND_5GHZ, 5600);
CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115);
+CHANNEL(chan_6ghz_221, NL80211_BAND_6GHZ, 7055);
/* Feel free to add more */
+#undef CHANNEL
+
+#define CHANDEF_LIST \
+ CHANDEF(chandef_2ghz_20mhz, chan_2ghz, 2412, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_2ghz_40mhz, chan_2ghz, 2422, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_2ghz_11_20mhz, chan_2ghz_11, 2462, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_5ghz_20mhz, chan_5ghz, 5200, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_5ghz_40mhz, chan_5ghz, 5210, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_5ghz_80mhz, chan_5ghz, 5210, \
+ NL80211_CHAN_WIDTH_80) \
+ CHANDEF(chandef_5ghz_160mhz, chan_5ghz, 5250, \
+ NL80211_CHAN_WIDTH_160) \
+ CHANDEF(chandef_5ghz_120_40mhz, chan_5ghz_120, 5610, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_6ghz_20mhz, chan_6ghz, 6115, \
+ NL80211_CHAN_WIDTH_20) \
+ CHANDEF(chandef_6ghz_40mhz, chan_6ghz, 6125, \
+ NL80211_CHAN_WIDTH_40) \
+ CHANDEF(chandef_6ghz_80mhz, chan_6ghz, 6145, \
+ NL80211_CHAN_WIDTH_80) \
+ CHANDEF(chandef_6ghz_160mhz, chan_6ghz, 6185, \
+ NL80211_CHAN_WIDTH_160) \
+ CHANDEF(chandef_6ghz_320mhz, chan_6ghz, 6105, \
+ NL80211_CHAN_WIDTH_320) \
+ CHANDEF(chandef_6ghz_221_160mhz, chan_6ghz_221, 6985, \
+ NL80211_CHAN_WIDTH_160) \
+ /* Feel free to add more */
-CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20);
-CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40);
-CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160);
-/* Feel free to add more */
-
-//struct cfg80211_chan_def;
+#define CHANDEF(_name, _channel, _freq1, _width) \
+__maybe_unused static const struct cfg80211_chan_def _name = { \
+ .chan = &(_channel), \
+ .center_freq1 = (_freq1), \
+ .width = (_width), \
+};
+CHANDEF_LIST
+#undef CHANDEF
struct ieee80211_chanctx_conf *
-iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def);
-
-static inline struct ieee80211_chanctx_conf *
-iwlmld_kunit_add_chanctx(enum nl80211_band band, enum nl80211_chan_width width)
-{
- struct cfg80211_chan_def chandef;
-
- switch (band) {
- case NL80211_BAND_2GHZ:
- chandef = chandef_2ghz;
- break;
- case NL80211_BAND_5GHZ:
- chandef = chandef_5ghz;
- break;
- default:
- case NL80211_BAND_6GHZ:
- chandef = chandef_6ghz;
- break;
- }
-
- chandef.width = width;
-
- return iwlmld_kunit_add_chanctx_from_def(&chandef);
-}
+iwlmld_kunit_add_chanctx(const struct cfg80211_chan_def *def);
void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
index 1909953a9be9..f8a8c35066be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
@@ -13,6 +13,9 @@
#include "mld.h"
#include "hcmd.h"
+#define IWL_MLD_NUM_CTDP_STEPS 20
+#define IWL_MLD_MIN_CTDP_BUDGET_MW 150
+
#define IWL_MLD_CT_KILL_DURATION (5 * HZ)
void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
@@ -116,8 +119,8 @@ free_resp:
static int compare_temps(const void *a, const void *b)
{
- return ((s16)le16_to_cpu(*(__le16 *)a) -
- (s16)le16_to_cpu(*(__le16 *)b));
+ return ((s16)le16_to_cpu(*(const __le16 *)a) -
+ (s16)le16_to_cpu(*(const __le16 *)b));
}
struct iwl_trip_walk_data {
@@ -272,43 +275,27 @@ static void iwl_mld_thermal_zone_register(struct iwl_mld *mld)
}
}
-/* budget in mWatt */
-static const u32 iwl_mld_cdev_budgets[] = {
- 2400, /* cooling state 0 */
- 2000, /* cooling state 1 */
- 1800, /* cooling state 2 */
- 1600, /* cooling state 3 */
- 1400, /* cooling state 4 */
- 1200, /* cooling state 5 */
- 1000, /* cooling state 6 */
- 900, /* cooling state 7 */
- 800, /* cooling state 8 */
- 700, /* cooling state 9 */
- 650, /* cooling state 10 */
- 600, /* cooling state 11 */
- 550, /* cooling state 12 */
- 500, /* cooling state 13 */
- 450, /* cooling state 14 */
- 400, /* cooling state 15 */
- 350, /* cooling state 16 */
- 300, /* cooling state 17 */
- 250, /* cooling state 18 */
- 200, /* cooling state 19 */
- 150, /* cooling state 20 */
-};
-
int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
enum iwl_ctdp_cmd_operation op)
{
struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
- .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]),
.window_size = 0,
};
+ u32 budget;
int ret;
lockdep_assert_wiphy(mld->wiphy);
+ /* Do a linear scale from IWL_MLD_MIN_CTDP_BUDGET_MW to the configured
+ * maximum in the predefined number of steps.
+ */
+ budget = ((mld->power_budget_mw - IWL_MLD_MIN_CTDP_BUDGET_MW) *
+ (IWL_MLD_NUM_CTDP_STEPS - 1 - state)) /
+ (IWL_MLD_NUM_CTDP_STEPS - 1) +
+ IWL_MLD_MIN_CTDP_BUDGET_MW;
+ cmd.budget = cpu_to_le32(budget);
+
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD),
&cmd);
@@ -326,7 +313,7 @@ int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1;
+ *state = IWL_MLD_NUM_CTDP_STEPS - 1;
return 0;
}
@@ -354,7 +341,7 @@ static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev,
goto unlock;
}
- if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) {
+ if (new_state >= IWL_MLD_NUM_CTDP_STEPS) {
ret = -EINVAL;
goto unlock;
}
@@ -417,10 +404,50 @@ static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld)
}
#endif /* CONFIG_THERMAL */
+static u32 iwl_mld_ctdp_get_max_budget(struct iwl_mld *mld)
+{
+ u64 bios_power_budget = 0;
+ u32 default_power_budget;
+
+ switch (CSR_HW_RFID_TYPE(mld->trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_GF:
+ /* dual-radio devices have a higher budget */
+ if (CSR_HW_RFID_IS_CDB(mld->trans->info.hw_rf_id))
+ default_power_budget = 5200;
+ else
+ default_power_budget = 2880;
+ break;
+ case IWL_CFG_RF_TYPE_FM:
+ default_power_budget = 3450;
+ break;
+ case IWL_CFG_RF_TYPE_WH:
+ case IWL_CFG_RF_TYPE_PE:
+ default:
+ default_power_budget = 5550;
+ break;
+ }
+
+ iwl_bios_get_pwr_limit(&mld->fwrt, &bios_power_budget);
+
+ /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */
+ if (bios_power_budget &&
+ bios_power_budget != 0xffff && bios_power_budget != 0xffffffff &&
+ bios_power_budget >= IWL_MLD_MIN_CTDP_BUDGET_MW &&
+ bios_power_budget <= default_power_budget)
+ return (u32)bios_power_budget;
+
+ return default_power_budget;
+}
+
void iwl_mld_thermal_initialize(struct iwl_mld *mld)
{
+ lockdep_assert_not_held(&mld->wiphy->mtx);
+
wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill);
+ mld->power_budget_mw = iwl_mld_ctdp_get_max_budget(mld);
+ IWL_DEBUG_TEMP(mld, "cTDP power budget: %d mW\n", mld->power_budget_mw);
+
#ifdef CONFIG_THERMAL
iwl_mld_cooling_device_register(mld);
iwl_mld_thermal_zone_register(mld);
@@ -429,7 +456,9 @@ void iwl_mld_thermal_initialize(struct iwl_mld *mld)
void iwl_mld_thermal_exit(struct iwl_mld *mld)
{
+ wiphy_lock(mld->wiphy);
wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk);
+ wiphy_unlock(mld->wiphy);
#ifdef CONFIG_THERMAL
iwl_mld_cooling_device_unregister(mld);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
index f054cc921d9d..a9ca92c0455e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -44,7 +44,7 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
u16 flags = 0;
/* STBC flags */
- if (mld->cfg->ht_params->stbc &&
+ if (mld->cfg->ht_params.stbc &&
(hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) {
if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
@@ -56,7 +56,7 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
}
/* LDPC */
- if (mld->cfg->ht_params->ldpc &&
+ if (mld->cfg->ht_params.ldpc &&
((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
(has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
@@ -658,7 +658,9 @@ void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
if (WARN_ON(!mld_link_sta))
return;
- mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+ mld_link_sta->last_rate_n_flags =
+ iwl_v3_rate_from_v2_v3(notif->rate,
+ mld->fw_rates_ver_3);
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
mld_link_sta->last_rate_n_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
index 543abe72e465..3b4b575aadaa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
@@ -76,14 +76,14 @@ static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
*/
unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
IWL_WATCHDOG_DISABLED :
- mld->trans->trans_cfg->base_params->wd_timeout;
+ mld->trans->mac_cfg->base->wd_timeout;
int queue, size;
lockdep_assert_wiphy(mld->wiphy);
if (tid == IWL_MGMT_TID)
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
- mld->trans->cfg->min_txq_size);
+ mld->trans->mac_cfg->base->min_txq_size);
else
size = iwl_mld_get_queue_size(mld, txq);
@@ -391,9 +391,9 @@ static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
/* Set CCK or OFDM flag */
if (rate_idx <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
+ rate_flags |= RATE_MCS_MOD_TYPE_CCK;
else
- rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+ rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
/* Legacy rates are indexed:
* 0 - 3 for CCK and 0 - 7 for OFDM
@@ -425,47 +425,40 @@ static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
struct ieee80211_tx_rate *rate = &info->control.rates[0];
u32 result;
- /* we only care about legacy/HT/VHT so far, so we can
- * build in v1 and use iwl_new_rate_from_v1()
- * FIXME: in newer devices we only support the new rates, build
- * the rate_n_flags in the new format here instead of using v1 and
- * converting it.
- */
-
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
u8 nss = ieee80211_rate_get_vht_nss(rate);
- result = RATE_MCS_VHT_MSK_V1;
- result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
+ result = RATE_MCS_MOD_TYPE_VHT;
+ result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_80;
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
- result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
-
- result = iwl_new_rate_from_v1(result);
+ result |= RATE_MCS_CHAN_WIDTH_160;
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- result = RATE_MCS_HT_MSK_V1;
- result |= u32_encode_bits(rate->idx,
- RATE_HT_MCS_RATE_CODE_MSK_V1 |
- RATE_HT_MCS_NSS_MSK_V1);
+ /* only MCS 0-15 are supported */
+ u8 mcs = rate->idx & 7;
+ u8 nss = rate->idx > 7;
+
+ result = RATE_MCS_MOD_TYPE_HT;
+ result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
+ result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
if (info->flags & IEEE80211_TX_CTL_LDPC)
- result |= RATE_MCS_LDPC_MSK_V1;
+ result |= RATE_MCS_LDPC_MSK;
if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
result |= RATE_MCS_STBC_MSK;
-
- result = iwl_new_rate_from_v1(result);
} else {
result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
}
@@ -479,19 +472,23 @@ static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
return result;
}
-static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, __le16 fc)
+static __le32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
+ u32 rate;
+
if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
- return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+ rate = iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+ else
+ rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
+ iwl_mld_get_tx_ant(mld, info, sta, fc);
- return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
- iwl_mld_get_tx_ant(mld, info, sta, fc);
+ return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3);
}
static void
-iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
+iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd *tx_cmd,
struct sk_buff *skb, bool amsdu)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -537,11 +534,11 @@ iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
NULL;
- struct iwl_tx_cmd_gen3 *tx_cmd;
+ struct iwl_tx_cmd *tx_cmd;
bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- u32 rate_n_flags = 0;
+ __le32 rate_n_flags = 0;
u16 flags = 0;
dev_tx_cmd->hdr.cmd = TX_CMD;
@@ -576,7 +573,7 @@ iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
tx_cmd->flags = cpu_to_le16(flags);
- tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ tx_cmd->rate_n_flags = rate_n_flags;
}
/* Caller of this need to check that info->control.vif is not NULL */
@@ -641,16 +638,36 @@ iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
case NL80211_IFTYPE_P2P_DEVICE:
mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
- if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
- IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+ if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC &&
+ mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) {
+ IWL_DEBUG_DROP(mld,
+ "Drop tx outside ROC with activity %d\n",
+ mld_vif->roc_activity);
return IWL_MLD_INVALID_DROP_TX;
}
WARN_ON(!ieee80211_is_mgmt(fc));
- return mld_vif->deflink.aux_sta.queue_id;
+ return mld_vif->aux_sta.queue_id;
+ case NL80211_IFTYPE_MONITOR:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+ return mld_vif->deflink.mon_sta.queue_id;
+ case NL80211_IFTYPE_STATION:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+
+ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
+ IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n");
+ return IWL_MLD_INVALID_DROP_TX;
+ }
+
+ if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) {
+ IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+ return IWL_MLD_INVALID_DROP_TX;
+ }
+
+ WARN_ON(!ieee80211_is_mgmt(fc));
+ return mld_vif->aux_sta.queue_id;
default:
- /* TODO: consider monitor (task=monitor) */
WARN_ONCE(1, "Unsupported vif type\n");
break;
}
@@ -831,7 +848,7 @@ static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
* 1 more for the potential data in the header
*/
if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
- mld->trans->max_skb_frags)
+ mld->trans->info.max_skb_frags)
num_subframes = 1;
if (num_subframes > 1)
@@ -977,11 +994,14 @@ void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
rcu_read_unlock();
}
-static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
+static void iwl_mld_hwrate_to_tx_rate(struct iwl_mld *mld,
+ __le32 rate_n_flags_fw,
struct ieee80211_tx_info *info)
{
enum nl80211_band band = info->band;
struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
+ u32 rate_n_flags = iwl_v3_rate_from_v2_v3(rate_n_flags_fw,
+ mld->fw_rates_ver_3);
u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
@@ -1006,18 +1026,19 @@ static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
}
switch (format) {
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
tx_rate->flags |= IEEE80211_TX_RC_MCS;
tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
break;
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
ieee80211_rate_set_vht(tx_rate,
rate_n_flags & RATE_MCS_CODE_MSK,
- FIELD_GET(RATE_MCS_NSS_MSK,
- rate_n_flags) + 1);
+ u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK) + 1);
tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
/* mac80211 cannot do this without ieee80211_tx_status_ext()
* but it only matters for radiotap
*/
@@ -1111,8 +1132,7 @@ void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
}
- iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info);
if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
ieee80211_tx_status_skb(mld->hw, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 21641d41a958..13cdc077d8d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -181,7 +181,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
@@ -569,7 +569,7 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3e8b7168af01..507c03198c92 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -300,7 +300,7 @@ static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw,
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
- mvm->trans->num_rx_queues);
+ mvm->trans->info.num_rxqs);
aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
@@ -421,7 +421,7 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
- mvm->trans->num_rx_queues);
+ mvm->trans->info.num_rxqs);
rsc[i] = cpu_to_le64((u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
@@ -1266,7 +1266,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
struct iwl_host_cmd d3_cfg_cmd = {
.id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
+ .flags = CMD_WANT_SKB,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
@@ -1370,11 +1370,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* recording before entering D3. In later devices the FW stops the
* recording automatically.
*/
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
-
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret)
@@ -1686,7 +1684,7 @@ static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
int i;
- for (i = 1; i < mvm->trans->num_rx_queues; i++)
+ for (i = 1; i < mvm->trans->info.num_rxqs; i++)
memcpy(ptk_pn->q[i].pn[tid],
status->ptk.aes.seq[tid].ccmp.pn,
IEEE80211_CCMP_PN_LEN);
@@ -2825,7 +2823,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status->qos_seq_ctr[i] + 0x10;
}
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
i = mvm->offload_tid;
iwl_trans_set_q_ptrs(mvm->trans,
mvm_ap_sta->tid_data[i].txq_id,
@@ -3407,9 +3405,9 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
int ret;
enum iwl_d3_status d3_status;
struct iwl_host_cmd cmd = {
- .id = D0I3_END_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
- };
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB,
+ };
bool reset = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -3427,7 +3425,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
* AX210 and above don't need the command since they have
* the doorbell interrupt.
*/
- if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
+ if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) {
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
@@ -3564,9 +3562,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
- /* after the successful handshake, we're out of D3 */
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
/* when reset is required we can't send these following commands */
if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
goto query_wakeup_reasons;
@@ -3639,9 +3634,6 @@ out:
*/
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
- /* regardless of what happened, we're now out of D3 */
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
return 1;
}
@@ -3679,8 +3671,7 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
WARN_ON(iwl_mvm_power_update_device(mvm));
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
- ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3,
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0,
sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
if (ret)
IWL_ERR(mvm,
@@ -3735,7 +3726,6 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
mvm->fast_resume = false;
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 671d3f8d79c1..86a87ea89916 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1277,7 +1277,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
return -EIO;
/* supporting only MQ RX */
- if (!mvm->trans->trans_cfg->mq_rx_supported)
+ if (!mvm->trans->mac_cfg->mq_rx_supported)
return -EOPNOTSUPP;
rxb._page = alloc_pages(GFP_ATOMIC, 0);
@@ -1468,7 +1468,7 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
{
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return -EOPNOTSUPP;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 2b5a62604fc4..819e3228462a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -29,8 +29,8 @@
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
struct iwl_mvm_alive_data {
+ __le32 sku_id[3];
bool valid;
- u32 scd_base_addr;
};
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
@@ -57,13 +57,13 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
};
- if (mvm->trans->num_rx_queues == 1)
+ if (mvm->trans->info.num_rxqs == 1)
return 0;
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
- 1 + (i % (mvm->trans->num_rx_queues - 1));
+ 1 + (i % (mvm->trans->info.num_rxqs - 1));
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
@@ -114,7 +114,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
u32 i;
- if (version == 6) {
+ if (version >= 6) {
struct iwl_alive_ntf_v6 *palive;
if (pkt_len < sizeof(*palive))
@@ -157,6 +157,17 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
}
}
}
+
+ if (version >= 8) {
+ const struct iwl_alive_ntf *palive_v8 =
+ (void *)pkt->data;
+
+ if (pkt_len < sizeof(*palive_v8))
+ return false;
+
+ IWL_DEBUG_FW(mvm, "platform id: 0x%llx\n",
+ palive_v8->platform_id);
+ }
}
if (version >= 5) {
@@ -171,14 +182,15 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
- mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
- mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
- mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+ BUILD_BUG_ON(sizeof(palive->sku_id.data) !=
+ sizeof(alive_data->sku_id));
+ memcpy(alive_data->sku_id, palive->sku_id.data,
+ sizeof(palive->sku_id.data));
IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
- mvm->trans->sku_id[0],
- mvm->trans->sku_id[1],
- mvm->trans->sku_id[2]);
+ le32_to_cpu(alive_data->sku_id[0]),
+ le32_to_cpu(alive_data->sku_id[1]),
+ le32_to_cpu(alive_data->sku_id[2]));
} else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
struct iwl_alive_ntf_v4 *palive;
@@ -221,7 +233,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
if (umac_error_table) {
if (umac_error_table >=
- mvm->trans->cfg->min_umac_error_event_table) {
+ mvm->trans->mac_cfg->base->min_umac_error_event_table) {
iwl_fw_umac_set_alive_err_table(mvm->trans,
umac_error_table);
} else {
@@ -233,7 +245,6 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
}
}
- alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
IWL_DEBUG_FW(mvm,
@@ -282,7 +293,7 @@ static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
struct iwl_trans *trans = mvm->trans;
- enum iwl_device_family device_family = trans->trans_cfg->device_family;
+ enum iwl_device_family device_family = trans->mac_cfg->device_family;
if (device_family < IWL_DEVICE_FAMILY_8000)
return;
@@ -304,7 +315,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
{
struct iwl_notification_wait alive_wait;
struct iwl_mvm_alive_data alive_data = {};
- const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
@@ -317,11 +327,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
- fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
- else
- fw = iwl_get_ucode_image(mvm->fw, ucode_type);
- if (WARN_ON(!fw))
- return -EINVAL;
+ ucode_type = IWL_UCODE_REGULAR_USNIFFER;
iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@@ -334,7 +340,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
* For the unified firmware case, the ucode_type is not
* INIT, but we still need to run it.
*/
- ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
+ ret = iwl_trans_start_fw(mvm->trans, mvm->fw, ucode_type,
+ run_in_rfkill);
if (ret) {
iwl_fw_set_current_image(&mvm->fwrt, old_type);
iwl_remove_notification(&mvm->notif_wait, &alive_wait);
@@ -348,7 +355,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
MVM_UCODE_ALIVE_TIMEOUT);
- if (mvm->trans->trans_cfg->device_family ==
+ if (mvm->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_AX210) {
/* print these registers regardless of alive fail/success */
IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
@@ -365,14 +372,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_trans *trans = mvm->trans;
/* SecBoot info */
- if (trans->trans_cfg->device_family >=
+ if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
- } else if (trans->trans_cfg->device_family >=
+ } else if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
@@ -383,7 +390,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_mvm_print_pd_notification(mvm);
/* LMAC/UMAC PC info */
- if (trans->trans_cfg->device_family >=
+ if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
pc_data = trans->dbg.pc_data;
for (count = 0; count < trans->dbg.num_pc;
@@ -391,7 +398,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
IWL_ERR(mvm, "%s: 0x%x\n",
pc_data->pc_name,
pc_data->pc_address);
- } else if (trans->trans_cfg->device_family >=
+ } else if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_9000) {
IWL_ERR(mvm, "UMAC PC: 0x%x\n",
iwl_read_umac_prph(trans,
@@ -422,10 +429,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
- iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+ iwl_trans_fw_alive(mvm->trans);
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
- &mvm->fw->ucode_capa);
+ &mvm->fw->ucode_capa, alive_data.sku_id);
if (ret) {
IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
iwl_fw_set_current_image(&mvm->fwrt, old_type);
@@ -473,7 +480,7 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
struct iwl_phy_specific_cfg *phy_filters)
{
#ifdef CONFIG_ACPI
- *phy_filters = mvm->phy_filters;
+ *phy_filters = mvm->fwrt.phy_filters;
#endif /* CONFIG_ACPI */
}
@@ -490,7 +497,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
return;
}
@@ -504,11 +511,10 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
return;
}
- ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
- if (ret < 0) {
- IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
+ iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
+
+ if (!mvm->fwrt.uats_valid)
return;
- }
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
@@ -578,7 +584,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
/* set flags extra PHY configuration flags from the device's cfg */
phy_cfg_cmd.phy_cfg |=
- cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
+ cpu_to_le32(mvm->trans->mac_cfg->extra_phy_cfg_flags);
phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger;
@@ -617,7 +623,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
mvm->rfkill_safe_init_done = false;
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
/* if needed, we'll reset this on our way out later */
mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
@@ -651,11 +657,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
- CNVI_PMU_STEP_FLOW) &
- CNVI_PMU_STEP_FLOW_FORCE_URM);
-
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -756,7 +757,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
goto remove_notif;
}
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
ret = iwl_mvm_send_bt_init_conf(mvm);
if (ret)
goto remove_notif;
@@ -1270,7 +1271,7 @@ void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
}
}
- iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
+ iwl_acpi_get_phy_filters(&mvm->fwrt);
if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index 1ea7c44250d4..c3cc1ea3ccc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2025 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#include <linux/leds.h>
@@ -102,7 +102,7 @@ void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
* if we control through the register, we're doing it
* even when the firmware isn't up, so no need to sync
*/
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
return;
iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index bec18d197f31..9098a36530cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -938,12 +938,19 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
- u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
+ u16 flags, cck_flag;
- if (rate_idx <= IWL_FIRST_CCK_RATE)
- flags |= is_new_rate ? IWL_MAC_BEACON_CCK
- : IWL_MAC_BEACON_CCK_V1;
+ if (is_new_rate) {
+ flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
+ cck_flag = IWL_MAC_BEACON_CCK;
+ } else {
+ cck_flag = IWL_MAC_BEACON_CCK_V1;
+ flags = iwl_fw_rate_idx_to_plcp(rate_idx);
+ }
+
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ flags |= cck_flag;
return flags;
}
@@ -969,7 +976,7 @@ u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
- struct iwl_tx_cmd *tx)
+ struct iwl_tx_cmd_v6 *tx)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_tx_info *info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1e916a0ce082..0f056a6641bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -146,7 +146,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
MCC_UPDATE_CMD, 0);
IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver);
- regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+ regd = iwl_parse_nvm_mcc_info(mvm->trans,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
@@ -311,8 +311,8 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* This has been tested on those devices only */
- if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
- mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000)
+ if (mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
+ mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000)
return -EOPNOTSUPP;
if (!mvm->nvm_data)
@@ -391,7 +391,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
* for older devices. We also don't see this issue on any newer
* devices.
*/
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000)
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
@@ -402,7 +402,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* We want to use the mac80211's reorder buffer for 9000 */
if (iwl_mvm_has_new_rx_api(mvm) &&
- mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000)
+ mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_9000)
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -417,10 +417,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return -EINVAL;
}
- if (mvm->trans->num_rx_queues > 1)
+ if (mvm->trans->info.num_rxqs > 1)
ieee80211_hw_set(hw, USES_RSS);
- if (mvm->trans->max_skb_frags)
+ if (mvm->trans->info.max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
hw->queues = IEEE80211_NUM_ACS;
@@ -441,7 +441,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- hw->max_tx_fragments = mvm->trans->max_skb_frags;
+ hw->max_tx_fragments = mvm->trans->info.max_skb_frags;
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
@@ -544,7 +544,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_DFS_CONCURRENT);
@@ -610,7 +610,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->bands[NL80211_BAND_6GHZ] =
&mvm->nvm_data->bands[NL80211_BAND_6GHZ];
- hw->wiphy->hw_version = mvm->trans->hw_id;
+ hw->wiphy->hw_version = mvm->trans->info.hw_id;
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -731,8 +731,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
- ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
-
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
device_can_wakeup(mvm->trans->dev)) {
@@ -771,7 +769,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
- hw->netdev_features |= mvm->cfg->features;
+ hw->netdev_features |= mvm->trans->mac_cfg->base->features;
if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK;
@@ -1379,7 +1377,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
iwl_mvm_rm_aux_sta(mvm);
if (suspend &&
- mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+ mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
iwl_mvm_fast_suspend(mvm);
/* From this point on, we won't touch the device */
iwl_mvm_mei_device_state(mvm, false);
@@ -1988,15 +1986,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
* interface is be handled as part of the stop_ap flow.
*/
if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
-#ifdef CONFIG_NL80211_TESTMODE
- if (vif == mvm->noa_vif) {
- mvm->noa_vif = NULL;
- mvm->noa_duration = 0;
- }
-#endif
+ vif->type == NL80211_IFTYPE_ADHOC)
goto out;
- }
iwl_mvm_power_update_mac(mvm);
@@ -3059,7 +3050,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
* context. For the newer, the beacon is a resource that belongs to a
* MAC, so need to send beacon template after adding the mac.
*/
- if (mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_22000) {
+ if (mvm->trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_22000) {
/* Add the mac context */
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
@@ -4105,7 +4096,8 @@ void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (!iwl_mvm_has_rlc_offload(mvm))
+ if (!iwl_mvm_has_rlc_offload(mvm) ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2)
return;
mvmvif->ps_disabled = !vif->cfg.ps;
@@ -4395,7 +4387,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- if (!mvm->trans->trans_cfg->gen2) {
+ if (!mvm->trans->mac_cfg->gen2) {
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
} else if (vif->type == NL80211_IFTYPE_STATION) {
@@ -4512,7 +4504,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
ptk_pn = kzalloc(struct_size(ptk_pn, q,
- mvm->trans->num_rx_queues),
+ mvm->trans->info.num_rxqs),
GFP_KERNEL);
if (!ptk_pn) {
ret = -ENOMEM;
@@ -4521,7 +4513,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
ieee80211_get_key_rx_seq(key, tid, &seq);
- for (q = 0; q < mvm->trans->num_rx_queues; q++)
+ for (q = 0; q < mvm->trans->info.num_rxqs; q++)
memcpy(ptk_pn->q[q].pn[tid],
seq.ccmp.pn,
IEEE80211_CCMP_PN_LEN);
@@ -5525,70 +5517,6 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
&mvm_sta->vif->bss_conf);
}
-#ifdef CONFIG_NL80211_TESTMODE
-static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
- [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
- [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
- [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
-};
-
-static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- void *data, int len)
-{
- struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
- int err;
- u32 noa_duration;
-
- err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len,
- iwl_mvm_tm_policy, NULL);
- if (err)
- return err;
-
- if (!tb[IWL_MVM_TM_ATTR_CMD])
- return -EINVAL;
-
- switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
- case IWL_MVM_TM_CMD_SET_NOA:
- if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
- !vif->bss_conf.enable_beacon ||
- !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
- return -EINVAL;
-
- noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
- if (noa_duration >= vif->bss_conf.beacon_int)
- return -EINVAL;
-
- mvm->noa_duration = noa_duration;
- mvm->noa_vif = vif;
-
- return iwl_mvm_update_quotas(mvm, true, NULL);
- case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
- /* must be associated client vif - ignore authorized */
- if (!vif || vif->type != NL80211_IFTYPE_STATION ||
- !vif->cfg.assoc || !vif->bss_conf.dtim_period ||
- !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
- return -EINVAL;
-
- if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif);
- return iwl_mvm_disable_beacon_filter(mvm, vif);
- }
-
- return -EOPNOTSUPP;
-}
-
-int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- void *data, int len)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-
- guard(mvm)(mvm);
- return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
-}
-#endif
-
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
{
@@ -6140,12 +6068,12 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
break;
}
- if (format == RATE_MCS_CCK_MSK ||
- format == RATE_MCS_LEGACY_OFDM_MSK) {
+ if (format == RATE_MCS_MOD_TYPE_CCK ||
+ format == RATE_MCS_MOD_TYPE_LEGACY_OFDM) {
int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
/* add the offset needed to get to the legacy ofdm indices */
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
rate += IWL_FIRST_OFDM_RATE;
switch (rate) {
@@ -6190,7 +6118,7 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->nss = u32_get_bits(rate_n_flags,
RATE_MCS_NSS_MSK) + 1;
- rinfo->mcs = format == RATE_MCS_HT_MSK ?
+ rinfo->mcs = format == RATE_MCS_MOD_TYPE_HT ?
RATE_HT_MCS_INDEX(rate_n_flags) :
u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
@@ -6198,11 +6126,11 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
/* TODO: GI/LTF/RU. How does the firmware encode them? */
rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -6243,10 +6171,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
break;
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
break;
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
break;
}
@@ -6426,17 +6354,10 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
bool sync,
const void *data, u32 size)
{
- struct {
- struct iwl_rxq_sync_cmd cmd;
- struct iwl_mvm_internal_rxq_notif notif;
- } __packed cmd = {
- .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1),
- .cmd.count =
- cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) +
- size),
- .notif.type = type,
- .notif.sync = sync,
- };
+ DEFINE_RAW_FLEX(struct iwl_rxq_sync_cmd, cmd, payload,
+ sizeof(struct iwl_mvm_internal_rxq_notif));
+ struct iwl_mvm_internal_rxq_notif *notif =
+ (struct iwl_mvm_internal_rxq_notif *)cmd->payload;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
.data[0] = &cmd,
@@ -6447,16 +6368,22 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
};
int ret;
+ cmd->rxq_mask = cpu_to_le32(BIT(mvm->trans->info.num_rxqs) - 1);
+ cmd->count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) +
+ size);
+ notif->type = type;
+ notif->sync = sync;
+
/* size must be a multiple of DWORD */
- if (WARN_ON(cmd.cmd.count & cpu_to_le32(3)))
+ if (WARN_ON(cmd->count & cpu_to_le32(3)))
return;
if (!iwl_mvm_has_new_rx_api(mvm))
return;
if (sync) {
- cmd.notif.cookie = mvm->queue_sync_cookie;
- mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1;
+ notif->cookie = mvm->queue_sync_cookie;
+ mvm->queue_sync_state = (1 << mvm->trans->info.num_rxqs) - 1;
}
ret = iwl_mvm_send_cmd(mvm, &hcmd);
@@ -6649,8 +6576,6 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.sync_rx_queues = iwl_mvm_sync_rx_queues,
- CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
-
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index bb7851042177..81ca9ff67be9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -1,17 +1,25 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2024 Intel Corporation
+ * Copyright (C) 2022 - 2025 Intel Corporation
*/
#include "mvm.h"
static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_config_cmd *cmd)
+ struct iwl_mac_config_cmd *cmd,
+ int cmd_ver)
{
- if (vif->type == NL80211_IFTYPE_AP)
- cmd->he_ap_support = cpu_to_le16(1);
- else
- cmd->he_support = cpu_to_le16(1);
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_ap_support = 1;
+ } else {
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
+ else
+ cmd->wifi_gen.he_support = 1;
+ }
}
static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
@@ -22,6 +30,12 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
+ int cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ MAC_CONFIG_CMD), 0);
+
+ if (WARN_ON(cmd_ver < 1 && cmd_ver > 3))
+ return;
cmd->id_and_color = cpu_to_le32(mvmvif->id);
cmd->action = cpu_to_le32(action);
@@ -30,8 +44,8 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
- cmd->he_support = 0;
- cmd->eht_support = 0;
+ cmd->wifi_gen_v2.he_support = 0;
+ cmd->wifi_gen_v2.eht_support = 0;
/* should be set by specific context type handler */
cmd->filter_flags = 0;
@@ -51,8 +65,11 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- iwl_mvm_mld_set_he_support(mvm, vif, cmd);
- cmd->eht_support = cpu_to_le32(1);
+ iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
+ else
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -63,16 +80,19 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
continue;
if (link_conf->he_support)
- iwl_mvm_mld_set_he_support(mvm, vif, cmd);
+ iwl_mvm_mld_set_he_support(mvm, vif, cmd, cmd_ver);
- /* it's not reasonable to have EHT without HE and FW API doesn't
+ /* It's not reasonable to have EHT without HE and FW API doesn't
* support it. Ignore EHT in this case.
*/
if (!link_conf->he_support && link_conf->eht_support)
continue;
if (link_conf->eht_support) {
- cmd->eht_support = cpu_to_le32(1);
+ if (cmd_ver == 2)
+ cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
+ else
+ cmd->wifi_gen.eht_support = 1;
break;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 78d7153a0cfc..bf24f8cb673e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -150,19 +150,6 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_vif_dbgfs_rm_link(mvm, vif);
- /* For AP/GO interface, the tear down of the resources allocated to the
- * interface is be handled as part of the stop_ap flow.
- */
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
-#ifdef CONFIG_NL80211_TESTMODE
- if (vif == mvm->noa_vif) {
- mvm->noa_vif = NULL;
- mvm->noa_duration = 0;
- }
-#endif
- }
-
iwl_mvm_power_update_mac(mvm);
/* Before the interface removal, mac80211 would cancel the ROC, and the
@@ -1403,8 +1390,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.sync_rx_queues = iwl_mvm_sync_rx_queues,
- CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
-
#ifdef CONFIG_PM_SLEEP
/* look at d3.c */
.suspend = iwl_mvm_suspend,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 9dd670041137..e1010521c3ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include "mvm.h"
#include "time-sync.h"
@@ -48,9 +48,13 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
struct iwl_sta_cfg_cmd *cmd)
{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
+ int cmd_len = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) > 1 ?
+ sizeof(*cmd) :
+ sizeof(struct iwl_sta_cfg_cmd_v1);
int ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
- 0, sizeof(*cmd), cmd);
+ 0, cmd_len, cmd);
if (ret)
IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
@@ -144,7 +148,7 @@ int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
{
int ret, txq;
unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout :
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA))
return -ENOSPC;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index f6391c7a3e29..a4f412e750d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -126,7 +126,7 @@ struct iwl_mvm_time_event_data {
/* Power management */
/**
- * enum iwl_power_scheme
+ * enum iwl_power_scheme - iwl power schemes
* @IWL_POWER_SCHEME_CAM: Continuously Active Mode
* @IWL_POWER_SCHEME_BPS: Balanced Power Save (default)
* @IWL_POWER_SCHEME_LP: Low Power
@@ -664,6 +664,8 @@ enum iwl_mvm_sched_scan_pass_all_states {
* @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
+ * @power_budget_mw: maximum cTDP power budget as defined for this system and
+ * device
*/
struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
@@ -672,6 +674,8 @@ struct iwl_mvm_tt_mgmt {
u32 min_backoff;
struct iwl_tt_params params;
bool throttle;
+
+ u32 power_budget_mw;
};
#ifdef CONFIG_THERMAL
@@ -999,7 +1003,7 @@ struct iwl_mvm {
struct iwl_trans *trans;
const struct iwl_fw *fw;
- const struct iwl_cfg *cfg;
+ const struct iwl_rf_cfg *cfg;
struct iwl_phy_db *phy_db;
struct ieee80211_hw *hw;
@@ -1033,6 +1037,8 @@ struct iwl_mvm {
u8 cca_40mhz_workaround;
+ u8 fw_rates_ver;
+
u32 ampdu_ref;
bool ampdu_toggle;
@@ -1246,11 +1252,6 @@ struct iwl_mvm {
struct iwl_time_quota_cmd last_quota_cmd;
-#ifdef CONFIG_NL80211_TESTMODE
- u32 noa_duration;
- struct ieee80211_vif *noa_vif;
-#endif
-
/* Tx queues */
u16 aux_queue;
u16 snif_queue;
@@ -1348,10 +1349,6 @@ struct iwl_mvm {
__le16 cur_aid;
u8 cur_bssid[ETH_ALEN];
-#ifdef CONFIG_ACPI
- struct iwl_phy_specific_cfg phy_filters;
-#endif
-
/* report rx timestamp in ptp clock time */
bool rx_ts_ptp;
@@ -1562,7 +1559,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
*/
- if (mvm->cfg->nvm_type == IWL_NVM_EXT)
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT)
return nvm_lar && tlv_lar;
else
return tlv_lar;
@@ -1626,13 +1623,13 @@ static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw)
static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
{
/* TODO - replace with TLV once defined */
- return mvm->trans->trans_cfg->gen2;
+ return mvm->trans->mac_cfg->gen2;
}
static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
{
/* TODO - better define this */
- return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+ return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1657,7 +1654,7 @@ static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
* but then there's a little bit of code in scan that won't make
* any sense...
*/
- return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+ return mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000;
}
static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
@@ -1732,13 +1729,13 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans)
{
- if (CSR_HW_RFID_IS_CDB(trans->hw_rf_id))
+ if (CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id))
return false;
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case IWL_CFG_RF_TYPE_FM:
/* Step A doesn't support eSR */
- return CSR_HW_RFID_STEP(trans->hw_rf_id);
+ return CSR_HW_RFID_STEP(trans->info.hw_rf_id);
case IWL_CFG_RF_TYPE_WH:
case IWL_CFG_RF_TYPE_PE:
return true;
@@ -1757,8 +1754,8 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
/* Check if HW supports eSR or STR */
if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->info.hw_rf_id)))
return IWL_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1771,7 +1768,7 @@ extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return iwl_mvm_ac_to_bz_tx_fifo[ac];
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_ac_to_gen2_tx_fifo[ac];
@@ -1810,9 +1807,6 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
enum nl80211_band band,
struct ieee80211_tx_rate *r);
-void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
- enum nl80211_band band,
- struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx);
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1843,9 +1837,9 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
+ struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc);
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
@@ -1876,7 +1870,7 @@ int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm,
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd)
+ struct iwl_tx_cmd_v6 *tx_cmd)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -2138,6 +2132,10 @@ bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
const struct iwl_mvm_link_sel_data *b);
s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
+
+
+extern const struct iwl_hcmd_arr iwl_mvm_groups[];
+extern const unsigned int iwl_mvm_groups_size;
#endif
/* AP and IBSS */
@@ -2459,7 +2457,7 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
*/
static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
{
- return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) &
+ return ((BIT(mvm->trans->mac_cfg->base->num_of_queues) - 1) &
~BIT(IWL_MVM_DQA_CMD_QUEUE));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 80ec59c58ae4..953218f1e025 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -120,7 +120,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
} else {
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM access command failed with status %d (device: %s)\n",
- ret, mvm->trans->name);
+ ret, mvm->trans->info.name);
ret = -ENODATA;
}
goto exit;
@@ -191,7 +191,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
while (ret == length) {
/* Check no memory assumptions fail and cause an overflow */
if ((size_read + offset + length) >
- mvm->trans->trans_cfg->base_params->eeprom_size) {
+ mvm->trans->mac_cfg->base->eeprom_size) {
IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
return -ENOBUFS;
}
@@ -206,7 +206,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
offset += ret;
}
- iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
+ iwl_nvm_fixups(mvm->trans->info.hw_id, section, data, offset);
IWL_DEBUG_EEPROM(mvm->trans->dev,
"NVM section %d read completed\n", section);
@@ -226,7 +226,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
/* Checking for required sections */
if (mvm->trans->cfg->nvm_type == IWL_NVM) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+ !mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
return NULL;
}
@@ -244,7 +244,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
return NULL;
}
/* MAC_OVERRIDE or at least HW section must exist */
- if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+ if (!mvm->nvm_sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data &&
!mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
IWL_ERR(mvm,
"Can't parse mac_address, empty sections\n");
@@ -260,7 +260,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
}
}
- hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
+ hw = (const __be16 *)sections[mvm->trans->mac_cfg->base->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
mac_override =
@@ -308,16 +308,15 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
int ret, section;
u32 size_read = 0;
u8 *nvm_buffer, *temp;
- const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
- if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ if (WARN_ON_ONCE(mvm->trans->mac_cfg->base->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
return -EINVAL;
/* load NVM values from nic */
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
- nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size,
+ nvm_buffer = kmalloc(mvm->trans->mac_cfg->base->eeprom_size,
GFP_KERNEL);
if (!nvm_buffer)
return -ENOMEM;
@@ -338,7 +337,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
break;
}
- iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
+ iwl_nvm_fixups(mvm->trans->info.hw_id, section, temp, ret);
mvm->nvm_sections[section].data = temp;
mvm->nvm_sections[section].length = ret;
@@ -367,7 +366,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_reg_blob.size = ret;
break;
default:
- if (section == mvm->cfg->nvm_hw_section_num) {
+ if (section == mvm->trans->mac_cfg->base->nvm_hw_section_num) {
mvm->nvm_hw_blob.data = temp;
mvm->nvm_hw_blob.size = ret;
break;
@@ -384,21 +383,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
/* read External NVM file from the mod param */
ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
mvm->nvm_sections);
- if (ret) {
- mvm->nvm_file_name = nvm_file_C;
-
- if ((ret == -EFAULT || ret == -ENOENT) &&
- mvm->nvm_file_name) {
- /* in case nvm file was failed try again */
- ret = iwl_read_external_nvm(mvm->trans,
- mvm->nvm_file_name,
- mvm->nvm_sections);
- if (ret)
- return ret;
- } else {
- return ret;
- }
- }
+ if (ret)
+ return ret;
}
/* parse the relevant nvm sections */
@@ -554,7 +540,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
struct ieee80211_regdomain *regd;
char mcc[3];
- if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT) {
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 76603ef02704..a2dc5c3b0596 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -92,11 +92,11 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
radio_cfg_step, radio_cfg_dash);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return;
/* SKU control */
- reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
+ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->info.hw_rev);
/* radio configuration */
reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
@@ -114,7 +114,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* unrelated errors. Need to further investigate this, but for now
* we'll separate cases.
*/
- if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
@@ -135,7 +135,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- if (!mvm->trans->cfg->apmg_not_supported)
+ if (!mvm->trans->mac_cfg->base->apmg_not_supported)
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -777,7 +777,8 @@ static const struct iwl_hcmd_names iwl_mvm_bt_coex_names[] = {
HCMD_NAME(PROFILE_NOTIF),
};
-static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
@@ -793,6 +794,11 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names),
[STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups);
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_mvm_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_groups_size);
+#endif
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
@@ -1272,13 +1278,12 @@ static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
}
static struct iwl_op_mode *
-iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
{
struct ieee80211_hw *hw;
struct iwl_op_mode *op_mode;
struct iwl_mvm *mvm;
- struct iwl_trans_config trans_cfg = {};
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
@@ -1286,6 +1291,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
+ int ratecheck;
int err;
/*
@@ -1306,18 +1312,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (!hw)
return ERR_PTR(-ENOMEM);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
max_agg = 512;
else
max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
hw->max_rx_aggregation_subframes = max_agg;
- if (cfg->max_tx_agg_size)
- hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
- else
- hw->max_tx_aggregation_subframes = max_agg;
-
op_mode = hw->priv;
mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -1337,19 +1338,58 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->init_status = 0;
+ /* start with v1 rates */
+ mvm->fw_rates_ver = 1;
+
+ /* check for rates version 2 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 8) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 3) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 6);
+ if (ratecheck != 0 && ratecheck != 4) {
+ IWL_ERR(mvm, "Firmware has inconsistent rates\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ if (ratecheck == 4)
+ mvm->fw_rates_ver = 2;
+
+ /* check for rates version 3 */
+ ratecheck =
+ (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) >= 11) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) >= 6) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 4) +
+ (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) >= 9);
+ if (ratecheck != 0 && ratecheck != 5) {
+ IWL_ERR(mvm, "Firmware has inconsistent rates\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+ if (ratecheck == 5)
+ mvm->fw_rates_ver = 3;
+
+ trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
- trans->rx_mpdu_cmd_hdr_size =
- (trans->trans_cfg->device_family >=
+ trans->conf.rx_mpdu_cmd_hdr_size =
+ (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
} else {
op_mode->ops = &iwl_mvm_ops;
- trans->rx_mpdu_cmd_hdr_size =
+ trans->conf.rx_mpdu_cmd_hdr_size =
sizeof(struct iwl_rx_mpdu_res_start);
- if (WARN_ON(trans->num_rx_queues > 1)) {
+ if (WARN_ON(trans->info.num_rxqs > 1)) {
err = -EINVAL;
goto out_free;
}
@@ -1437,53 +1477,50 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
* Populate the state variables that the transport layer needs
* to know about.
*/
- trans_cfg.op_mode = op_mode;
- trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
- trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+ BUILD_BUG_ON(sizeof(no_reclaim_cmds) >
+ sizeof(trans->conf.no_reclaim_cmds));
+ memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
+ sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size();
+ trans->conf.rx_buf_size = iwl_amsdu_size_to_rxb_size();
- trans->wide_cmd_header = true;
- trans_cfg.bc_table_dword =
- mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
+ trans->conf.wide_cmd_header = true;
- trans_cfg.command_groups = iwl_mvm_groups;
- trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+ trans->conf.command_groups = iwl_mvm_groups;
+ trans->conf.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
- trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
- trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
- trans_cfg.scd_set_active = true;
+ trans->conf.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ trans->conf.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+ trans->conf.scd_set_active = true;
- trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
- driver_data[2]);
+ trans->conf.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
"%.31s", fw->fw_version);
- trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
+ trans->conf.fw_reset_handshake =
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
- trans_cfg.queue_alloc_cmd_ver =
+ trans->conf.queue_alloc_cmd_ver =
iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
SCD_QUEUE_CONFIG_CMD),
0);
mvm->sta_remove_requires_queue_remove =
- trans_cfg.queue_alloc_cmd_ver > 0;
+ trans->conf.queue_alloc_cmd_ver > 0;
mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw);
/* Configure transport layer */
- iwl_trans_configure(mvm->trans, &trans_cfg);
+ iwl_trans_op_mode_enter(mvm->trans, op_mode);
- trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
- trans->iml = mvm->fw->iml;
- trans->iml_len = mvm->fw->iml_len;
-
/* set up notification wait support */
iwl_notification_wait_init(&mvm->notif_wait);
@@ -2100,7 +2137,7 @@ static bool iwl_mvm_sw_reset(struct iwl_op_mode *op_mode,
mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
return true;
- } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
+ } else if (mvm->trans->mac_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
return true;
}
@@ -2125,7 +2162,6 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mvm_stop_device(mvm);
mvm->fast_resume = false;
mutex_unlock(&mvm->mutex);
@@ -2165,7 +2201,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- if (unlikely(queue >= mvm->trans->num_rx_queues))
+ if (unlikely(queue >= mvm->trans->info.num_rxqs))
return;
if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index a386b315e52f..0057fddf88f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -376,6 +376,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
if (!vif->cfg.ps || !mvmvif->pm_enabled)
return;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, MAC_PM_POWER_TABLE, 0) >= 2)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ENABLE_SMPS_MSK);
+
if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
(!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index aad2614af9ad..798a7e4bea83 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2021-2022, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -86,45 +86,6 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
}
}
-static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
- struct iwl_time_quota_cmd *cmd)
-{
-#ifdef CONFIG_NL80211_TESTMODE
- struct iwl_mvm_vif *mvmvif;
- int i, phy_id = -1, beacon_int = 0;
-
- if (!mvm->noa_duration || !mvm->noa_vif)
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
- if (!mvmvif->ap_ibss_active)
- return;
-
- phy_id = mvmvif->deflink.phy_ctxt->id;
- beacon_int = mvm->noa_vif->bss_conf.beacon_int;
-
- for (i = 0; i < MAX_BINDINGS; i++) {
- struct iwl_time_quota_data *data =
- iwl_mvm_quota_cmd_get_quota(mvm, cmd,
- i);
- u32 id_n_c = le32_to_cpu(data->id_and_color);
- u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
- u32 quota = le32_to_cpu(data->quota);
-
- if (id != phy_id)
- continue;
-
- quota *= (beacon_int - mvm->noa_duration);
- quota /= beacon_int;
-
- IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
- le32_to_cpu(data->quota), quota);
-
- data->quota = cpu_to_le32(quota);
- }
-#endif
-}
-
int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
bool force_update,
struct ieee80211_vif *disabled_vif)
@@ -260,8 +221,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
}
}
- iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
-
/* check that we have non-zero quota for all valid bindings */
for (i = 0; i < MAX_BINDINGS; i++) {
qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index de5ac000272e..89ac4c6b3e54 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -72,7 +72,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
u16 flags = 0;
/* get STBC flags */
- if (mvm->cfg->ht_params->stbc &&
+ if (mvm->cfg->ht_params.stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
@@ -83,7 +83,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
}
- if (mvm->cfg->ht_params->ldpc &&
+ if (mvm->cfg->ht_params.ldpc &&
((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
@@ -454,22 +454,11 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
char pretty_rate[100];
- if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
- TLC_MNG_UPDATE_NOTIF, 0) < 3) {
- rs_pretty_print_rate_v1(pretty_rate,
- sizeof(pretty_rate),
- le32_to_cpu(notif->rate));
- IWL_DEBUG_RATE(mvm,
- "Got rate in old format. Rate: %s. Converting.\n",
- pretty_rate);
- lq_sta->last_rate_n_flags =
- iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
- } else {
- lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
- }
+ lq_sta->last_rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(notif->rate, mvm->fw_rates_ver);
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
lq_sta->last_rate_n_flags);
- IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
+ IWL_DEBUG_RATE(mvm, "rate: %s\n", pretty_rate);
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 068c58e9c1eb..5802ed80a9ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,6 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014, 2018 - 2023 Intel Corporation. All rights reserved.
+ * Copyright(c) 2025 Intel Corporation
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*****************************************************************************/
@@ -895,7 +896,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
WARN_ON_ONCE(1);
}
} else if (ucode_rate & RATE_MCS_VHT_MSK_V1) {
- nss = FIELD_GET(RATE_MCS_NSS_MSK, ucode_rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, ucode_rate) + 1;
if (nss == 1) {
rate->type = LQ_VHT_SISO;
@@ -909,7 +910,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
WARN_ON_ONCE(1);
}
} else if (ucode_rate & RATE_MCS_HE_MSK_V1) {
- nss = FIELD_GET(RATE_MCS_NSS_MSK, ucode_rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, ucode_rate) + 1;
if (nss == 1) {
rate->type = LQ_HE_SISO;
@@ -2696,8 +2697,10 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
lq_sta = mvm_sta;
spin_lock_bh(&lq_sta->pers.lock);
- iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
- info->band, &info->control.rates[0]);
+ iwl_mvm_hwrate_to_tx_rate(iwl_mvm_v3_rate_from_fw(
+ cpu_to_le32(lq_sta->last_rate_n_flags),
+ 1),
+ info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
/* Report the optimal rate based on rssi and STA caps if we haven't
@@ -2707,8 +2710,12 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
last_ucode_rate = ucode_rate_from_rs_rate(mvm,
optimal_rate);
- iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
- &txrc->reported_rate);
+ last_ucode_rate =
+ iwl_mvm_v3_rate_from_fw(cpu_to_le32(last_ucode_rate),
+ 1);
+ iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+ &txrc->reported_rate);
+ txrc->reported_rate.count = 1;
}
spin_unlock_bh(&lq_sta->pers.lock);
}
@@ -2813,11 +2820,11 @@ static void rs_ht_init(struct iwl_mvm *mvm,
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
- if (mvm->cfg->ht_params->ldpc &&
+ if (mvm->cfg->ht_params.ldpc &&
(ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
lq_sta->ldpc = true;
- if (mvm->cfg->ht_params->stbc &&
+ if (mvm->cfg->ht_params.stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
lq_sta->stbc_capable = true;
@@ -2832,11 +2839,11 @@ static void rs_vht_init(struct iwl_mvm *mvm,
{
rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
- if (mvm->cfg->ht_params->ldpc &&
+ if (mvm->cfg->ht_params.ldpc &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
lq_sta->ldpc = true;
- if (mvm->cfg->ht_params->stbc &&
+ if (mvm->cfg->ht_params.stbc &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc_capable = true;
@@ -2887,10 +2894,10 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
if (rate & RATE_MCS_HT_MSK_V1) {
mvm->drv_rx_stats.ht_frames++;
- nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1) + 1;
+ nss = FIELD_GET(RATE_HT_MCS_MIMO2_MSK, rate) + 1;
} else if (rate & RATE_MCS_VHT_MSK_V1) {
mvm->drv_rx_stats.vht_frames++;
- nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1;
} else {
mvm->drv_rx_stats.legacy_frames++;
}
@@ -3304,7 +3311,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
if (num_of_ant(ant) == 1)
lq_cmd->single_stream_ant_msk = ant;
- if (!mvm->trans->trans_cfg->gen2)
+ if (!mvm->trans->mac_cfg->gen2)
lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
else
lq_cmd->agg_frame_cnt_limit =
@@ -3673,16 +3680,15 @@ int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate)
if (rate & RATE_MCS_VHT_MSK_V1) {
type = "VHT";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
- nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1;
} else if (rate & RATE_MCS_HT_MSK_V1) {
type = "HT";
mcs = rate & RATE_HT_MCS_INDEX_MSK_V1;
- nss = ((rate & RATE_HT_MCS_NSS_MSK_V1)
- >> RATE_HT_MCS_NSS_POS_V1) + 1;
+ nss = FIELD_GET(RATE_HT_MCS_MIMO2_MSK, rate) + 1;
} else if (rate & RATE_MCS_HE_MSK_V1) {
type = "HE";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
- nss = FIELD_GET(RATE_MCS_NSS_MSK, rate) + 1;
+ nss = FIELD_GET(RATE_VHT_MCS_NSS_MSK, rate) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
@@ -4172,3 +4178,167 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
else
return rs_drv_tx_protection(mvm, mvmsta, enable);
}
+
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
+u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver)
+{
+ u32 rate_v3 = 0, rate_v1;
+ u32 dup = 0;
+
+ if (rate_ver > 1)
+ return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3);
+
+ rate_v1 = le32_to_cpu(rate);
+ if (rate_v1 == 0)
+ return rate_v1;
+ /* convert rate */
+ if (rate_v1 & RATE_MCS_HT_MSK_V1) {
+ u32 nss;
+
+ rate_v3 |= RATE_MCS_MOD_TYPE_HT;
+ rate_v3 |=
+ rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
+ nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK);
+ rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+ } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
+ rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK);
+
+ rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
+
+ rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
+ if (rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
+ u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
+ RATE_MCS_HE_106T_POS_V1;
+ u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
+ RATE_MCS_HE_GI_LTF_POS;
+
+ if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
+ he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
+ he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
+ /* the new rate have an additional bit to
+ * represent the value 4 rather then using SGI
+ * bit for this purpose - as it was done in the
+ * old rate
+ */
+ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
+ RATE_MCS_SGI_POS_V1;
+
+ rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
+ rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS;
+ rate_v3 |= he_106t << RATE_MCS_HE_106T_POS;
+ rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
+ rate_v3 |= RATE_MCS_MOD_TYPE_HE;
+ } else {
+ rate_v3 |= RATE_MCS_MOD_TYPE_VHT;
+ }
+ /* if legacy format */
+ } else {
+ u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
+
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
+ rate_v3 |= legacy_rate;
+ if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
+ rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
+ }
+
+ /* convert flags */
+ if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
+ rate_v3 |= RATE_MCS_LDPC_MSK;
+ rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate_v1 & RATE_MCS_ANT_AB_MSK) |
+ (rate_v1 & RATE_MCS_STBC_MSK) |
+ (rate_v1 & RATE_MCS_BF_MSK);
+
+ dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
+ if (dup) {
+ rate_v3 |= RATE_MCS_DUP_MSK;
+ rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS;
+ }
+
+ if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
+ (rate_v1 & RATE_MCS_SGI_MSK_V1))
+ rate_v3 |= RATE_MCS_SGI_MSK;
+
+ return rate_v3;
+}
+
+__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver)
+{
+ u32 result = 0;
+ int rate_idx;
+
+ if (rate_ver > 1)
+ return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2);
+
+ switch (rate & RATE_MCS_MOD_TYPE_MSK) {
+ case RATE_MCS_MOD_TYPE_CCK:
+ result = RATE_MCS_CCK_MSK_V1;
+ fallthrough;
+ case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
+ rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK);
+ if (!(result & RATE_MCS_CCK_MSK_V1))
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx),
+ RATE_LEGACY_RATE_MSK_V1);
+ break;
+ case RATE_MCS_MOD_TYPE_HT:
+ result = RATE_MCS_HT_MSK_V1;
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_HT_MCS_CODE_MSK),
+ RATE_HT_MCS_RATE_CODE_MSK_V1);
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_MCS_NSS_MSK),
+ RATE_HT_MCS_MIMO2_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
+ result = RATE_MCS_VHT_MSK_V1;
+ result |= u32_encode_bits(u32_get_bits(rate,
+ RATE_VHT_MCS_NSS_MSK),
+ RATE_MCS_CODE_MSK);
+ result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
+ RATE_VHT_MCS_NSS_MSK);
+ break;
+ case RATE_MCS_MOD_TYPE_HE: /* not generated */
+ default:
+ WARN_ONCE(1, "bad modulation type %d\n",
+ u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK));
+ return 0;
+ }
+
+ if (rate & RATE_MCS_LDPC_MSK)
+ result |= RATE_MCS_LDPC_MSK_V1;
+ WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) >
+ RATE_MCS_CHAN_WIDTH_160_VAL);
+ result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate & RATE_MCS_ANT_AB_MSK) |
+ (rate & RATE_MCS_STBC_MSK) |
+ (rate & RATE_MCS_BF_MSK);
+
+ /* not handling DUP since we don't use it */
+ WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK);
+
+ if (rate & RATE_MCS_SGI_MSK)
+ result |= RATE_MCS_SGI_MSK_V1;
+
+ return cpu_to_le32(result);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ea81cb236d5c..69259ebb966b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation
+ * Copyright (C) 2003 - 2014, 2018 - 2025 Intel Corporation
*****************************************************************************/
#ifndef __rs_h__
@@ -424,6 +424,9 @@ void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
+u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver);
+__le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver);
+
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 2dbef7b46355..8eb0aa448c85 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -490,8 +490,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) &&
rate_n_flags & RATE_MCS_SGI_MSK_V1)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK_V1)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
@@ -1001,7 +999,7 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
sec_link = mvmvif->link[sec_link]->fw_link_id;
/* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ for (int q = 0; q < mvm->trans->info.num_rxqs; q++) {
spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
/* The link IDs that doesn't exist will contain 0 */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 14ea89f931bb..077aadbf95db 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -171,7 +171,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
- else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ else if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
/* mac80211 assumes full CSUM including SNAP header */
skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
@@ -409,7 +409,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
- if (mvm->trans->trans_cfg->gen2 &&
+ if (mvm->trans->mac_cfg->gen2 &&
!(status & RX_MPDU_RES_STATUS_MIC_OK))
stats->flag |= RX_FLAG_MMIC_ERROR;
@@ -426,7 +426,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (pkt_flags & FH_RSCSR_RADA_EN) {
stats->flag |= RX_FLAG_ICV_STRIPPED;
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
stats->flag |= RX_FLAG_MMIC_STRIPPED;
}
@@ -462,7 +462,7 @@ static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
{
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
@@ -744,7 +744,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000)
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000)
return false;
/*
@@ -1944,7 +1944,7 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
}
/* must be before L-SIG data */
- if (format == RATE_MCS_HE_MSK)
+ if (format == RATE_MCS_MOD_TYPE_HE)
iwl_mvm_rx_he(mvm, skb, phy_data, queue);
iwl_mvm_decode_lsig(skb, phy_data);
@@ -1966,45 +1966,45 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
phy_data->energy_a, phy_data->energy_b);
/* using TLV format and must be after all fixed len fields */
- if (format == RATE_MCS_EHT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_EHT)
iwl_mvm_rx_eht(mvm, skb, phy_data, queue);
if (unlikely(mvm->monitor_on))
iwl_mvm_add_rtap_sniffer_config(mvm, skb);
- is_sgi = format == RATE_MCS_HE_MSK ?
+ is_sgi = format == RATE_MCS_MOD_TYPE_HE ?
iwl_he_is_sgi(rate_n_flags) :
rate_n_flags & RATE_MCS_SGI_MSK;
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
+ if (!(format == RATE_MCS_MOD_TYPE_CCK) && is_sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
switch (format) {
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rx_status->encoding = RX_ENC_VHT;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
rx_status->encoding = RX_ENC_HE;
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
break;
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->encoding = RX_ENC_EHT;
break;
}
switch (format) {
- case RATE_MCS_HT_MSK:
+ case RATE_MCS_MOD_TYPE_HT:
rx_status->encoding = RX_ENC_HT;
rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
break;
- case RATE_MCS_VHT_MSK:
- case RATE_MCS_HE_MSK:
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->nss =
u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
@@ -2048,7 +2048,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
desc_size = sizeof(*desc);
else
desc_size = IWL_RX_DESC_SIZE_V1;
@@ -2058,8 +2058,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
}
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ phy_data.rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(desc->v3.rate_n_flags,
+ mvm->fw_rates_ver);
phy_data.channel = desc->v3.channel;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
phy_data.energy_a = desc->v3.energy_a;
@@ -2072,7 +2074,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.eht_d4 = desc->phy_eht_data4;
phy_data.d5 = desc->v3.phy_data5;
} else {
- phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ phy_data.rate_n_flags =
+ iwl_mvm_v3_rate_from_fw(desc->v1.rate_n_flags,
+ mvm->fw_rates_ver);
phy_data.channel = desc->v1.channel;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
phy_data.energy_a = desc->v1.energy_a;
@@ -2084,13 +2088,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.d3 = desc->v1.phy_data3;
}
- if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- REPLY_RX_MPDU_CMD, 0) < 4) {
- phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
- IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
- phy_data.rate_n_flags);
- }
-
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
len = le16_to_cpu(desc->mpdu_len);
@@ -2138,14 +2135,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
/* set the preamble flag if appropriate */
- if (format == RATE_MCS_CCK_MSK &&
+ if (format == RATE_MCS_MOD_TYPE_CCK &&
phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
u64 tsf_on_air_rise;
- if (mvm->trans->trans_cfg->device_family >=
+ if (mvm->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210)
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
else
@@ -2157,7 +2154,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
- u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
+ u8 band = u8_get_bits(desc->mac_phy_band,
+ IWL_RX_MPDU_MAC_PHY_BAND_BAND_MASK);
rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
} else {
@@ -2303,7 +2301,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- if (mvm->trans->trans_cfg->device_family ==
+ if (mvm->trans->mac_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
iwl_mvm_flip_address(hdr->addr3);
@@ -2349,7 +2347,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ if (mvm->trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
(desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
@@ -2383,7 +2381,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.d1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
- phy_data.rate_n_flags = le32_to_cpu(desc->rate);
phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
@@ -2391,14 +2388,8 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.rx_vec[0] = desc->rx_vec[0];
phy_data.rx_vec[1] = desc->rx_vec[1];
- if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
- RX_NO_DATA_NOTIF, 0) < 2) {
- IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
- phy_data.rate_n_flags);
- phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
- IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
- phy_data.rate_n_flags);
- }
+ phy_data.rate_n_flags = iwl_mvm_v3_rate_from_fw(desc->rate,
+ mvm->fw_rates_ver);
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
@@ -2411,7 +2402,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.rx_vec[2] = desc->rx_vec[2];
phy_data.rx_vec[3] = desc->rx_vec[3];
} else {
- if (format == RATE_MCS_EHT_MSK)
+ if (format == RATE_MCS_MOD_TYPE_EHT)
/* no support for EHT before version 3 API */
return;
}
@@ -2472,17 +2463,17 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
* may be up to 8 spatial streams.
*/
switch (format) {
- case RATE_MCS_VHT_MSK:
+ case RATE_MCS_MOD_TYPE_VHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
break;
- case RATE_MCS_HE_MSK:
+ case RATE_MCS_MOD_TYPE_HE:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
break;
- case RATE_MCS_EHT_MSK:
+ case RATE_MCS_MOD_TYPE_EHT:
rx_status->nss =
le32_get_bits(desc->rx_vec[2],
RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 78fd7faaed97..8ec4a007b4b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -791,10 +791,10 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
lockdep_assert_held(&mvm->mutex);
- if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
+ if (WARN(maxq >= mvm->trans->mac_cfg->base->num_of_queues,
"max queue %d >= num_of_queues (%d)", maxq,
- mvm->trans->trans_cfg->base_params->num_of_queues))
- maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
+ mvm->trans->mac_cfg->base->num_of_queues))
+ maxq = mvm->trans->mac_cfg->base->num_of_queues - 1;
/* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@@ -852,7 +852,7 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
- mvm->trans->cfg->min_txq_size);
+ mvm->trans->mac_cfg->base->min_txq_size);
} else {
size = iwl_mvm_get_queue_size(sta);
}
@@ -1765,7 +1765,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm_sta->deflink.sta_id = sta_id;
rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink);
- if (!mvm->trans->trans_cfg->gen2)
+ if (!mvm->trans->mac_cfg->gen2)
mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize =
LINK_QUAL_AGG_FRAME_LIMIT_DEF;
else
@@ -1798,7 +1798,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_has_new_rx_api(mvm)) {
int q;
- dup_data = kcalloc(mvm->trans->num_rx_queues,
+ dup_data = kcalloc(mvm->trans->info.num_rxqs,
sizeof(*dup_data), GFP_KERNEL);
if (!dup_data)
return -ENOMEM;
@@ -1811,7 +1811,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* This thus allows receiving a packet with seqno 0 and the
* retry bit set as the very first packet on a new TID.
*/
- for (q = 0; q < mvm->trans->num_rx_queues; q++)
+ for (q = 0; q < mvm->trans->info.num_rxqs; q++)
memset(dup_data[q].last_seq, 0xff,
sizeof(dup_data[q].last_seq));
mvm_sta->dup_data = dup_data;
@@ -1839,11 +1839,11 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
!sta->tdls && ieee80211_vif_is_mld(vif)) {
mvm_sta->mpdu_counters =
- kcalloc(mvm->trans->num_rx_queues,
+ kcalloc(mvm->trans->info.num_rxqs,
sizeof(*mvm_sta->mpdu_counters),
GFP_KERNEL);
if (mvm_sta->mpdu_counters)
- for (int q = 0; q < mvm->trans->num_rx_queues; q++)
+ for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
}
@@ -2189,7 +2189,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
u8 sta_id, u8 fifo)
{
unsigned int wdg_timeout =
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
struct iwl_trans_txq_scd_cfg cfg = {
.fifo = fifo,
.sta_id = sta_id,
@@ -2206,7 +2206,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
{
unsigned int wdg_timeout =
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
@@ -2717,7 +2717,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
- for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
int j;
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
@@ -2750,7 +2750,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
{
int i;
- for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ for (i = 0; i < mvm->trans->info.num_rxqs; i++) {
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
struct iwl_mvm_reorder_buf_entry *entries =
@@ -2925,7 +2925,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
- mvm->trans->num_rx_queues *
+ mvm->trans->info.num_rxqs *
reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
@@ -3177,7 +3177,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
normalized_ssn &= 0xff;
if (normalized_ssn == tid_data->next_reclaimed) {
@@ -4305,7 +4305,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
* In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
* to align the wrap around of ssn so we compare relevant values.
*/
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
sn &= 0xff;
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 19c905b641e2..6b183f5e9bbc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -225,8 +225,6 @@ struct iwl_mvm_vif;
* @IWL_AGG_ON: aggregation session is up
* @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
* HW queue to be empty from packets for this RA /TID.
- * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
*/
enum iwl_mvm_agg_state {
IWL_AGG_OFF = 0,
@@ -234,7 +232,6 @@ enum iwl_mvm_agg_state {
IWL_AGG_STARTING,
IWL_AGG_ON,
IWL_EMPTYING_HW_QUEUE_ADDBA,
- IWL_EMPTYING_HW_QUEUE_DELBA,
};
/**
@@ -262,7 +259,7 @@ struct iwl_mvm_tid_data {
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
- u32 rate_n_flags;
+ __le32 rate_n_flags;
u8 lq_color;
bool amsdu_in_ampdu_allowed;
enum iwl_mvm_agg_state state;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
index 6bd56a28cffd..895d53f223e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -1,3 +1,3 @@
-iwlmvm-tests-y += module.o links.o scan.o
+iwlmvm-tests-y += module.o links.o scan.o hcmd.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c
new file mode 100644
index 000000000000..1fee0320c756
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/test.h>
+
+#include <iwl-trans.h>
+#include "../mvm.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static void test_hcmd_names_sorted(struct kunit *test)
+{
+ for (int i = 0; i < iwl_mvm_groups_size; i++) {
+ const struct iwl_hcmd_arr *arr = &iwl_mvm_groups[i];
+
+ if (!arr->arr)
+ continue;
+
+ for (int j = 0; j < arr->size - 1; j++)
+ KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
+ arr->arr[j + 1].cmd_id);
+ }
+}
+
+static struct kunit_case hcmd_names_cases[] = {
+ KUNIT_CASE(test_hcmd_names_sorted),
+ {},
+};
+
+static struct kunit_suite hcmd_names = {
+ .name = "iwlmvm-hcmd-names",
+ .test_cases = hcmd_names_cases,
+};
+
+kunit_test_suite(hcmd_names);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 1a30bb1ff8ca..478408f802d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -771,15 +771,17 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
{
- struct iwl_roc_req_v5 roc_cmd = {
+ struct iwl_roc_req roc_cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.activity = cpu_to_le32(activity),
};
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_cmd);
int ret;
lockdep_assert_held(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
- sizeof(roc_cmd), &roc_cmd);
+ cmd_len, &roc_cmd);
if (ret)
IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
}
@@ -1102,11 +1104,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
{
int res;
u32 duration_tu, delay;
- struct iwl_roc_req_v5 roc_req = {
+ struct iwl_roc_req roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
};
+ u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(roc_req);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
@@ -1136,7 +1140,7 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_req), &roc_req);
+ 0, cmd_len, &roc_req);
if (!res)
mvmvif->roc_activity = activity;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index c851290e75a2..53bab21ebae2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -8,6 +8,9 @@
#include "mvm.h"
+#define IWL_MVM_NUM_CTDP_STEPS 20
+#define IWL_MVM_MIN_CTDP_BUDGET_MW 150
+
#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
@@ -479,43 +482,28 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
.support_tx_backoff = true,
};
-/* budget in mWatt */
-static const u32 iwl_mvm_cdev_budgets[] = {
- 2400, /* cooling state 0 */
- 2000, /* cooling state 1 */
- 1800, /* cooling state 2 */
- 1600, /* cooling state 3 */
- 1400, /* cooling state 4 */
- 1200, /* cooling state 5 */
- 1000, /* cooling state 6 */
- 900, /* cooling state 7 */
- 800, /* cooling state 8 */
- 700, /* cooling state 9 */
- 650, /* cooling state 10 */
- 600, /* cooling state 11 */
- 550, /* cooling state 12 */
- 500, /* cooling state 13 */
- 450, /* cooling state 14 */
- 400, /* cooling state 15 */
- 350, /* cooling state 16 */
- 300, /* cooling state 17 */
- 250, /* cooling state 18 */
- 200, /* cooling state 19 */
- 150, /* cooling state 20 */
-};
-
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
{
struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
- .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
.window_size = 0,
};
+ u32 budget;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
+ /* Do a linear scale from IWL_MVM_MIN_CTDP_BUDGET_MW to the configured
+ * maximum in the predefined number of steps.
+ */
+ budget = ((mvm->thermal_throttle.power_budget_mw -
+ IWL_MVM_MIN_CTDP_BUDGET_MW) *
+ (IWL_MVM_NUM_CTDP_STEPS - 1 - state)) /
+ (IWL_MVM_NUM_CTDP_STEPS - 1) +
+ IWL_MVM_MIN_CTDP_BUDGET_MW;
+ cmd.budget = cpu_to_le32(budget);
+
status = 0;
ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
CTDP_CONFIG_CMD),
@@ -552,8 +540,8 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
#ifdef CONFIG_THERMAL
static int compare_temps(const void *a, const void *b)
{
- return ((s16)le16_to_cpu(*(__le16 *)a) -
- (s16)le16_to_cpu(*(__le16 *)b));
+ return ((s16)le16_to_cpu(*(const __le16 *)a) -
+ (s16)le16_to_cpu(*(const __le16 *)b));
}
struct iwl_trip_walk_data {
@@ -707,7 +695,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
+ *state = IWL_MVM_NUM_CTDP_STEPS - 1;
return 0;
}
@@ -733,7 +721,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
return -EIO;
- if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets))
+ if (new_state >= IWL_MVM_NUM_CTDP_STEPS)
return -EINVAL;
return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
@@ -794,6 +782,47 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
}
#endif /* CONFIG_THERMAL */
+static u32 iwl_mvm_ctdp_get_max_budget(struct iwl_mvm *mvm)
+{
+ u64 bios_power_budget = 0;
+ u32 default_power_budget;
+
+ switch (CSR_HW_RFID_TYPE(mvm->trans->info.hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_JF2:
+ case IWL_CFG_RF_TYPE_JF1:
+ default_power_budget = 2000;
+ break;
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_HR1:
+ default_power_budget = 2400;
+ break;
+ case IWL_CFG_RF_TYPE_GF:
+ /* dual-radio devices have a higher budget */
+ if (CSR_HW_RFID_IS_CDB(mvm->trans->info.hw_rf_id))
+ default_power_budget = 5200;
+ else
+ default_power_budget = 2880;
+ break;
+ case IWL_CFG_RF_TYPE_FM:
+ default_power_budget = 3450;
+ break;
+ default:
+ default_power_budget = 5550;
+ break;
+ }
+
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &bios_power_budget);
+
+ /* 32bit in UEFI, 16bit in ACPI; use BIOS value if it is in range */
+ if (bios_power_budget &&
+ bios_power_budget != 0xffff && bios_power_budget != 0xffffffff &&
+ bios_power_budget >= IWL_MVM_MIN_CTDP_BUDGET_MW &&
+ bios_power_budget <= default_power_budget)
+ return (u32)bios_power_budget;
+
+ return default_power_budget;
+}
+
void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -805,6 +834,8 @@ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
else
tt->params = iwl_mvm_default_tt_params;
+ tt->power_budget_mw = iwl_mvm_ctdp_get_max_budget(mvm);
+ IWL_DEBUG_TEMP(mvm, "cTDP power budget: %d mW\n", tt->power_budget_mw);
tt->throttle = false;
tt->dynamic_smps = false;
tt->min_backoff = min_backoff;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index f67afb66ef2b..ac2cf1b8ce23 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -148,7 +148,7 @@ out:
* Sets most of the Tx cmd's fields
*/
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
+ struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info, u8 sta_id)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -283,14 +283,10 @@ static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm,
(rate_idx <= IWL_LAST_CCK_RATE);
/* Set CCK or OFDM flag */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
- if (!is_cck)
- rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
- else
- rate_flags |= RATE_MCS_CCK_MSK;
- } else if (is_cck) {
- rate_flags |= RATE_MCS_CCK_MSK_V1;
- }
+ if (!is_cck)
+ rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
+ else
+ rate_flags |= RATE_MCS_MOD_TYPE_CCK;
return (u32)rate_plcp | rate_flags;
}
@@ -303,45 +299,35 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_rate *rate = &info->control.rates[0];
u32 result;
- /*
- * we only care about legacy/HT/VHT so far, so we can
- * build in v1 and use iwl_new_rate_from_v1()
- */
-
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
u8 nss = ieee80211_rate_get_vht_nss(rate);
- result = RATE_MCS_VHT_MSK_V1;
+ result = RATE_MCS_MOD_TYPE_VHT;
result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_80;
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
- result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
- result = iwl_new_rate_from_v1(result);
+ result |= RATE_MCS_CHAN_WIDTH_160;
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- result = RATE_MCS_HT_MSK_V1;
- result |= u32_encode_bits(rate->idx,
- RATE_HT_MCS_RATE_CODE_MSK_V1 |
- RATE_HT_MCS_NSS_MSK_V1);
+ result = RATE_MCS_MOD_TYPE_HT;
+ result |= u32_encode_bits(rate->idx & 0x7,
+ RATE_HT_MCS_CODE_MSK);
+ result |= u32_encode_bits(rate->idx >> 3,
+ RATE_MCS_NSS_MSK);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- result |= RATE_MCS_SGI_MSK_V1;
+ result |= RATE_MCS_SGI_MSK;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ result |= RATE_MCS_CHAN_WIDTH_40;
if (info->flags & IEEE80211_TX_CTL_LDPC)
- result |= RATE_MCS_LDPC_MSK_V1;
+ result |= RATE_MCS_LDPC_MSK;
if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
result |= RATE_MCS_STBC_MSK;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
- result = iwl_new_rate_from_v1(result);
} else {
int rate_idx = info->control.rates[0].idx;
@@ -391,21 +377,25 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
return iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
}
-static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, __le16 fc)
+static __le32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
+ u32 rate;
+
if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
- return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
+ rate = iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
+ else
+ rate = iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
+ iwl_mvm_get_tx_ant(mvm, info, sta, fc);
- return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
- iwl_mvm_get_tx_ant(mvm, info, sta, fc);
+ return iwl_mvm_v3_rate_to_fw(rate, mvm->fw_rates_ver);
}
/*
* Sets the fields in the Tx cmd that are rate related
*/
-void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
@@ -443,8 +433,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
}
/* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags =
- cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
+ tx_cmd->rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc);
}
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
@@ -469,7 +458,7 @@ static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
*/
static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
+ struct iwl_tx_cmd_v6 *tx_cmd,
struct sk_buff *skb_frag,
int hdrlen)
{
@@ -543,7 +532,7 @@ static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
* (since we don't necesarily know the link), but FW rate
* selection was fixed.
*/
- return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+ return mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
}
static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
@@ -567,7 +556,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
- struct iwl_tx_cmd *tx_cmd;
+ struct iwl_tx_cmd_v6 *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -577,7 +566,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
- u32 rate_n_flags = 0;
+ __le32 rate_n_flags = 0;
u16 flags = 0;
struct iwl_mvm_sta *mvmsta = sta ?
iwl_mvm_sta_from_mac80211(sta) : NULL;
@@ -609,9 +598,9 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_HIGH_PRI;
}
- if (mvm->trans->trans_cfg->device_family >=
+ if (mvm->trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd *cmd = (void *)dev_cmd->payload;
u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
info, amsdu);
@@ -624,9 +613,9 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
- cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ cmd->rate_n_flags = rate_n_flags;
} else {
- struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v9 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb,
info, amsdu);
@@ -639,12 +628,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
- cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+ cmd->rate_n_flags = rate_n_flags;
}
goto out;
}
- tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
if (info->control.hw_key)
iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
@@ -1023,7 +1012,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* 1 more for the potential data in the header
*/
if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
- mvm->trans->max_skb_frags)
+ mvm->trans->info.max_skb_frags)
num_subframes = 1;
if (num_subframes > 1)
@@ -1185,7 +1174,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
if (!iwl_mvm_has_new_tx_api(mvm)) {
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
@@ -1372,8 +1361,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
- if ((tid_data->state == IWL_AGG_ON ||
- tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ if (tid_data->state == IWL_AGG_ON &&
iwl_mvm_tid_queued(mvm, tid_data) == 0) {
/*
* Now that this aggregation or DQA queue is empty tell
@@ -1388,7 +1376,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
* to align the wrap around of ssn so we compare relevant values.
*/
normalized_ssn = tid_data->ssn;
- if (mvm->trans->trans_cfg->gen2)
+ if (mvm->trans->mac_cfg->gen2)
normalized_ssn &= 0xff;
if (normalized_ssn != tid_data->next_reclaimed)
@@ -1402,15 +1390,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
tid_data->state = IWL_AGG_STARTING;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
-
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- IWL_DEBUG_TX_QUEUES(mvm,
- "Can continue DELBA flow ssn = next_recl = %d\n",
- tid_data->next_reclaimed);
- tid_data->state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
-
default:
break;
}
@@ -1477,7 +1456,7 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r)
{
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
- u32 rate = format == RATE_MCS_HT_MSK ?
+ u32 rate = format == RATE_MCS_MOD_TYPE_HT ?
RATE_HT_MCS_INDEX(rate_n_flags) :
rate_n_flags & RATE_MCS_CODE_MSK;
@@ -1487,68 +1466,51 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
if (rate_n_flags & RATE_MCS_SGI_MSK)
r->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (format == RATE_MCS_HT_MSK) {
+ switch (format) {
+ case RATE_MCS_MOD_TYPE_HT:
r->flags |= IEEE80211_TX_RC_MCS;
r->idx = rate;
- } else if (format == RATE_MCS_VHT_MSK) {
+ break;
+ case RATE_MCS_MOD_TYPE_VHT:
ieee80211_rate_set_vht(r, rate,
FIELD_GET(RATE_MCS_NSS_MSK,
rate_n_flags) + 1);
r->flags |= IEEE80211_TX_RC_VHT_MCS;
- } else if (format == RATE_MCS_HE_MSK) {
+ break;
+ case RATE_MCS_MOD_TYPE_HE:
+ case RATE_MCS_MOD_TYPE_EHT:
/* mac80211 cannot do this without ieee80211_tx_status_ext()
* but it only matters for radiotap */
r->idx = 0;
- } else {
+ break;
+ default:
r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
band);
}
}
-void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
- enum nl80211_band band,
- struct ieee80211_tx_rate *r)
-{
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-
- r->flags |=
- iwl_mvm_get_hwrate_chan_width(rate_n_flags &
- RATE_MCS_CHAN_WIDTH_MSK_V1);
-
- if (rate_n_flags & RATE_MCS_SGI_MSK_V1)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
- r->flags |= IEEE80211_TX_RC_MCS;
- r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
- ieee80211_rate_set_vht(
- r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
- FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1);
- r->flags |= IEEE80211_TX_RC_VHT_MCS;
- } else {
- r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- band);
- }
-}
-
/*
* translate ucode response to mac80211 tx status control values
*/
-static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
- u32 rate_n_flags,
+static void iwl_mvm_hwrate_to_tx_status(struct iwl_mvm *mvm,
+ __le32 rate_n_flags,
struct ieee80211_tx_info *info)
{
struct ieee80211_tx_rate *r = &info->status.rates[0];
+ u32 rate;
- if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) <= 6)
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ /*
+ * Technically this conversion is incorrect for BA status, however:
+ * - we only use the BA notif data for older firmware that have
+ * host rate scaling and don't use newer rate formats
+ * - the firmware API changed together for BA notif and TX CMD
+ * as well
+ */
+ rate = iwl_mvm_v3_rate_from_fw(rate_n_flags, mvm->fw_rates_ver);
info->status.antenna =
- ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
- iwl_mvm_hwrate_to_tx_rate(rate_n_flags,
- info->band, r);
+ ((rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate, info->band, r);
}
static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
@@ -1613,7 +1575,7 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
tx_resp->frame_count);
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return val & 0xFFFF;
return val & 0xFFF;
}
@@ -1700,9 +1662,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->status.rates[0].count = tx_resp->failure_frame + 1;
- iwl_mvm_hwrate_to_tx_status(mvm->fw,
- le32_to_cpu(tx_resp->initial_rate),
- info);
+ iwl_mvm_hwrate_to_tx_status(mvm, tx_resp->initial_rate, info);
/* Don't assign the converted initial_rate, because driver
* TLC uses this and doesn't support the new FW rate
@@ -1944,7 +1904,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
- le32_to_cpu(tx_resp->initial_rate);
+ tx_resp->initial_rate;
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
@@ -1969,7 +1929,7 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int txq, int index,
- struct ieee80211_tx_info *tx_info, u32 rate,
+ struct ieee80211_tx_info *tx_info, __le32 rate,
bool is_flush)
{
struct sk_buff_head reclaimed_skbs;
@@ -2053,7 +2013,9 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
tx_info->status.status_driver_data[0] =
RS_DRV_DATA_PACK(tid_data->lq_color,
tx_info->status.status_driver_data[0]);
- tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+ /* the value is only consumed for old FW that has v1 rates anyway */
+ tx_info->status.status_driver_data[1] =
+ (void *)(uintptr_t)le32_to_cpu(rate);
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -2072,7 +2034,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
info->flags |= IEEE80211_TX_STAT_AMPDU;
memcpy(&info->status, &tx_info->status,
sizeof(tx_info->status));
- iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
+ iwl_mvm_hwrate_to_tx_status(mvm, rate, info);
}
}
@@ -2095,7 +2057,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
goto out;
tx_info->band = chanctx_conf->def.chan->band;
- iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
+ iwl_mvm_hwrate_to_tx_status(mvm, rate, tx_info);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
@@ -2184,7 +2146,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
&ba_info,
- le32_to_cpu(ba_res->tx_rate), false);
+ ba_res->tx_rate, false);
}
if (mvmsta) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index dd890dcd1505..62da0132f383 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -142,7 +142,7 @@ int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
bool is_LB = band == NL80211_BAND_2GHZ;
- if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
return is_LB ? rate + IWL_FIRST_OFDM_RATE :
rate;
@@ -169,15 +169,9 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
- /* In the new rate legacy rates are indexed:
- * 0 - 3 for CCK and 0 - 7 for OFDM.
- */
- return (rate_idx >= IWL_FIRST_OFDM_RATE ?
- rate_idx - IWL_FIRST_OFDM_RATE :
- rate_idx);
-
- return iwl_fw_rate_idx_to_plcp(rate_idx);
+ return (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE :
+ rate_idx);
}
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
@@ -748,7 +742,7 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
unsigned int default_timeout =
- mvm->trans->trans_cfg->base_params->wd_timeout;
+ mvm->trans->mac_cfg->base->wd_timeout;
/*
* We can't know when the station is asleep or awake, so we
@@ -1187,9 +1181,9 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
{
u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
- if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
- mvm->trans->cfg->gp2_reg_addr)
- reg_addr = mvm->trans->cfg->gp2_reg_addr;
+ if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->mac_cfg->base->gp2_reg_addr)
+ reg_addr = mvm->trans->mac_cfg->base->gp2_reg_addr;
return iwl_read_prph(mvm->trans, reg_addr);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
index 8aa7c455bdee..976fd1f58da4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-fh.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
#include "internal.h"
#include "iwl-prph.h"
@@ -97,11 +97,12 @@ out:
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
}
-int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
- const struct fw_img *fw)
+int iwl_pcie_ctxt_info_v2_alloc(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ const struct fw_img *img)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_context_info_v2 *ctxt_info_v2;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
@@ -109,9 +110,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
u32 control_flags_ext = 0;
int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_DEF:
return -EINVAL;
case IWL_AMSDU_2K:
@@ -131,13 +132,13 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
break;
}
- if (trans->dsbr_urm_fw_dependent)
+ if (trans->conf.dsbr_urm_fw_dependent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
- if (trans->dsbr_urm_permanent)
+ if (trans->conf.dsbr_urm_permanent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
- if (trans->ext_32khz_clock_valid)
+ if (trans->conf.ext_32khz_clock_valid)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
/* Allocate prph scratch */
@@ -151,16 +152,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)trans->hw_rev);
+ cpu_to_le16((u16)trans->info.hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
- if (trans->trans_cfg->imr_enabled)
+ if (trans->mac_cfg->imr_enabled)
control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
- if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ if (CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iwl_is_force_scu_active_approved()) {
control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
IWL_DEBUG_FW(trans,
@@ -168,6 +169,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
}
+ if (trans->do_top_reset) {
+ WARN_ON(trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC);
+ control_flags |= IWL_PRPH_SCRATCH_TOP_RESET;
+ }
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
@@ -178,15 +184,16 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->control.control_flags_ext = cpu_to_le32(control_flags_ext);
/* initialize the Step equalizer data */
- prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
- prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_0 =
+ cpu_to_le32(trans->conf.mbx_addr_0_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_1 =
+ cpu_to_le32(trans->conf.mbx_addr_1_step);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+ ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common);
if (ret)
goto err_free_prph_scratch;
-
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
* assigned later
@@ -206,42 +213,58 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
}
/* Allocate context info */
- ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
- sizeof(*ctxt_info_gen3),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
- if (!ctxt_info_gen3) {
+ ctxt_info_v2 = dma_alloc_coherent(trans->dev,
+ sizeof(*ctxt_info_v2),
+ &trans_pcie->ctxt_info_dma_addr,
+ GFP_KERNEL);
+ if (!ctxt_info_v2) {
ret = -ENOMEM;
goto err_free_prph_info;
}
- ctxt_info_gen3->prph_info_base_addr =
+ ctxt_info_v2->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
- ctxt_info_gen3->prph_scratch_base_addr =
+ ctxt_info_v2->prph_scratch_base_addr =
cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
- ctxt_info_gen3->prph_scratch_size =
- cpu_to_le32(sizeof(*prph_scratch));
- ctxt_info_gen3->cr_head_idx_arr_base_addr =
+
+ /*
+ * This code assumes the FSEQ is last and we can make that
+ * optional; old devices _should_ be fine with a bigger size,
+ * but in simulation we check the size more precisely.
+ */
+ BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) +
+ sizeof(prph_scratch->dram.fseq_img) !=
+ sizeof(*prph_scratch));
+ if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ)
+ ctxt_info_v2->prph_scratch_size =
+ cpu_to_le32(sizeof(*prph_scratch));
+ else
+ ctxt_info_v2->prph_scratch_size =
+ cpu_to_le32(offsetofend(typeof(*prph_scratch),
+ dram.common));
+
+ ctxt_info_v2->cr_head_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
- ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ ctxt_info_v2->tr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
- ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ ctxt_info_v2->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
- ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
- ctxt_info_gen3->mcr_base_addr =
+ ctxt_info_v2->mtr_base_addr =
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
+ ctxt_info_v2->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
- ctxt_info_gen3->mtr_size =
+ ctxt_info_v2->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
- ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
+ ctxt_info_v2->mcr_size =
+ cpu_to_le16(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)));
- trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+ trans_pcie->ctxt_info_v2 = ctxt_info_v2;
trans_pcie->prph_info = prph_info;
trans_pcie->prph_scratch = prph_scratch;
/* Allocate IML */
- trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
+ trans_pcie->iml_len = fw->iml_len;
+ trans_pcie->iml = dma_alloc_coherent(trans->dev, fw->iml_len,
&trans_pcie->iml_dma_addr,
GFP_KERNEL);
if (!trans_pcie->iml) {
@@ -249,27 +272,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
goto err_free_ctxt_info;
}
- memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
-
- iwl_enable_fw_load_int_ctx_info(trans);
-
- /* kick FW self load */
- iwl_write64(trans, CSR_CTXT_INFO_ADDR,
- trans_pcie->ctxt_info_dma_addr);
- iwl_write64(trans, CSR_IML_DATA_ADDR,
- trans_pcie->iml_dma_addr);
- iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
-
- iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
- CSR_AUTO_FUNC_BOOT_ENA);
+ memcpy(trans_pcie->iml, fw->iml, fw->iml_len);
return 0;
err_free_ctxt_info:
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
- trans_pcie->ctxt_info_gen3,
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),
+ trans_pcie->ctxt_info_v2,
trans_pcie->ctxt_info_dma_addr);
- trans_pcie->ctxt_info_gen3 = NULL;
+ trans_pcie->ctxt_info_v2 = NULL;
err_free_prph_info:
dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
trans_pcie->prph_info_dma_addr);
@@ -283,14 +294,31 @@ err_free_prph_scratch:
}
-void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
+void iwl_pcie_ctxt_info_v2_kick(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_enable_fw_load_int_ctx_info(trans, trans->do_top_reset);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr);
+ iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr);
+ iwl_write32(trans, CSR_IML_SIZE_ADDR, trans_pcie->iml_len);
+
+ iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
+ CSR_AUTO_FUNC_BOOT_ENA);
+}
+
+void iwl_pcie_ctxt_info_v2_free(struct iwl_trans *trans, bool alive)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans_pcie->iml) {
- dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
+ dma_free_coherent(trans->dev, trans_pcie->iml_len,
+ trans_pcie->iml,
trans_pcie->iml_dma_addr);
trans_pcie->iml_dma_addr = 0;
+ trans_pcie->iml_len = 0;
trans_pcie->iml = NULL;
}
@@ -299,15 +327,15 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
if (alive)
return;
- if (!trans_pcie->ctxt_info_gen3)
+ if (!trans_pcie->ctxt_info_v2)
return;
- /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
- dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
- trans_pcie->ctxt_info_gen3,
+ /* ctxt_info_v2 and prph_scratch are still needed for PNVM load */
+ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_v2),
+ trans_pcie->ctxt_info_v2,
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_dma_addr = 0;
- trans_pcie->ctxt_info_gen3 = NULL;
+ trans_pcie->ctxt_info_v2 = NULL;
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
trans_pcie->prph_scratch,
@@ -322,9 +350,9 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
trans_pcie->prph_info = NULL;
}
-static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_data,
- struct iwl_dram_data *dram)
+static int iwl_pcie_load_payloads_contig(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ struct iwl_dram_data *dram)
{
u32 len, len0, len1;
@@ -411,9 +439,9 @@ static int iwl_pcie_load_payloads_segments
}
-int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa)
+int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_payloads,
+ const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -428,7 +456,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
return -EBUSY;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
if (!pnvm_payloads->n_chunks) {
@@ -445,10 +473,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
trans->pnvm_loaded = true;
} else {
/* save only in one DRAM section */
- ret = iwl_pcie_load_payloads_continuously
- (trans,
- pnvm_payloads,
- &dram_regions->drams[0]);
+ ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads,
+ &dram_regions->drams[0]);
if (!ret) {
dram_regions->n_regions = 1;
trans->pnvm_loaded = true;
@@ -483,7 +509,7 @@ static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
-static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
+static void iwl_pcie_set_contig_pnvm(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -495,21 +521,21 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
}
-void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+void iwl_trans_pcie_ctx_info_v2_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
{
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_pnvm_segments(trans);
else
- iwl_pcie_set_continuous_pnvm(trans);
+ iwl_pcie_set_contig_pnvm(trans);
}
-int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa)
+int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -521,7 +547,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
if (trans->reduce_power_loaded)
return 0;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
@@ -541,10 +567,8 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
trans->reduce_power_loaded = true;
} else {
/* save only in one DRAM section */
- ret = iwl_pcie_load_payloads_continuously
- (trans,
- payloads,
- &dram_regions->drams[0]);
+ ret = iwl_pcie_load_payloads_contig(trans, payloads,
+ &dram_regions->drams[0]);
if (!ret) {
dram_regions->n_regions = 1;
trans->reduce_power_loaded = true;
@@ -567,7 +591,7 @@ static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
-static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
+static void iwl_pcie_set_contig_reduce_power(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
@@ -580,15 +604,15 @@ static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
}
void
-iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
+iwl_trans_pcie_ctx_info_v2_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
{
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_reduce_power_segments(trans);
else
- iwl_pcie_set_continuous_reduce_power(trans);
+ iwl_pcie_set_contig_reduce_power(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 344e4d5a1c6e..cb36baac14da 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -83,7 +83,7 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
- struct iwl_context_info_dram *ctxt_dram)
+ struct iwl_context_info_dram_nonfseq *ctxt_dram)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
@@ -161,7 +161,7 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
}
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
- const struct fw_img *fw)
+ const struct fw_img *img)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info *ctxt_info;
@@ -180,11 +180,11 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)trans->hw_rev);
+ cpu_to_le16((u16)trans->info.hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
break;
@@ -202,10 +202,10 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ WARN_ON(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)) > 12);
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
control_flags |=
- u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
+ u32_encode_bits(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)),
IWL_CTXT_INFO_RB_CB_SIZE);
control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
@@ -218,12 +218,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans->conf.cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
/* allocate ucode sections in dram and set addresses */
- ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
+ ret = iwl_pcie_init_fw_sec(trans, img, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
@@ -232,7 +232,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
trans_pcie->ctxt_info = ctxt_info;
- iwl_enable_fw_load_int_ctx_info(trans);
+ iwl_enable_fw_load_int_ctx_info(trans, false);
/* Configure debug, if exists */
if (iwl_pcie_dbg_on(trans))
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 00056e76ea3d..656f8b06c27b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -17,15 +17,13 @@
#include "iwl-prph.h"
#include "internal.h"
-#define TRANS_CFG_MARKER BIT(0)
#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \
struct _struct)
extern int _invalid_type;
-#define _TRANS_CFG_MARKER(cfg) \
- (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \
- TRANS_CFG_MARKER, \
- __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
-#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
+#define _TRANS_CFG_CHECK(cfg) \
+ (__builtin_choose_expr(_IS_A(cfg, iwl_mac_cfg), \
+ 0, _invalid_type))
+#define _ASSIGN_CFG(cfg) (_TRANS_CFG_CHECK(cfg) + (kernel_ulong_t)&(cfg))
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
@@ -35,515 +33,518 @@ extern int _invalid_type;
/* Hardware specific file defines the PCI IDs table for that hardware module */
VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
#if IS_ENABLED(CONFIG_IWLDVM)
- {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5000_mac_cfg)}, /* Half Mini Card */
/* 5300 Series WiFi */
- {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5000_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5000_mac_cfg)}, /* Half Mini Card */
/* 5350 Series WiFi/WiMax */
- {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5000_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5000_mac_cfg)}, /* Mini Card */
/* 5150 Series Wifi/WiMax */
- {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
-
- {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_mac_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_mac_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_mac_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_mac_cfg)}, /* Half Mini Card */
/* 6x00 Series */
- {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
- {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_mac_cfg)},
/* 6x05 Series */
- {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
- {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */
/* 6x30 Series */
- {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_mac_cfg)},
/* 6x50 WiFi/WiMax Series */
- {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_mac_cfg)},
/* 6150 WiFi/WiMax Series */
- {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_mac_cfg)},
/* 1000 Series WiFi */
- {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_mac_cfg)},
/* 100 Series WiFi */
- {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)},
/* 130 Series WiFi */
- {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5007, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5015, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5017, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5025, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5027, iwl1000_mac_cfg)},
/* 2x00 Series */
- {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_mac_cfg)},
/* 2x30 Series */
- {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_mac_cfg)},
/* 6x35 Series */
- {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6030_mac_cfg)},
/* 105 Series */
- {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_mac_cfg)},
/* 135 Series */
- {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_mac_cfg)},
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
/* 7260 Series */
- {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
- {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7000_mac_cfg)},
/* 3160 Series */
- {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl7000_mac_cfg)},
/* 3165 Series */
- {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4212, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4310, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3166, 0x4210, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x8110, iwl7000_mac_cfg)},
/* 3168 Series */
- {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl7000_mac_cfg)},
/* 7265 Series */
- {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7000_mac_cfg)},
/* 8000 Series */
- {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
- {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xC030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0xD030, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1431, iwl8000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x1432, iwl8000_mac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_mac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_mac_cfg)},
/* Qu devices */
- {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
- {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_mac_cfg)},
+ {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_mac_cfg)},
- {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_mac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_mac_cfg)},
- {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_mac_cfg)},
-/* So devices */
- {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
- {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
- {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
+/* Ty/So devices */
+ {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_ty_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_mac_cfg)},
+ {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_mac_cfg)},
+ {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_mac_cfg)},
/* Ma devices */
- {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
- {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
+ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_mac_cfg)},
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz devices */
- {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1775, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1776, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_mac_cfg)},
+ {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_mac_cfg)},
/* Sc devices */
- {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
- {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
- {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
- {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
-
-/* Dr devices */
- {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_mac_cfg)},
#endif /* CONFIG_IWLMLD */
{0}
@@ -551,689 +552,518 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
-#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
- { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
- .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \
- .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
- .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY }
-
-#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
- _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, _cfg, _name)
+#define _IWL_DEV_INFO(_cfg, _name, ...) { \
+ .cfg = &_cfg, \
+ .name = _name, \
+ .device = IWL_CFG_ANY, \
+ .subdevice = IWL_CFG_ANY, \
+ .subdevice_m_h = 15, \
+ __VA_ARGS__ \
+}
+#define IWL_DEV_INFO(_cfg, _name, ...) \
+ _IWL_DEV_INFO(_cfg, _name, __VA_ARGS__)
+
+#define DEVICE(n) .device = (n)
+#define SUBDEV(n) .subdevice = (n)
+#define _LOWEST_BIT(n) (__builtin_ffs(n) - 1)
+#define _BIT_ABOVE_MASK(n) ((n) + (1 << _LOWEST_BIT(n)))
+#define _HIGHEST_BIT(n) (__builtin_ffs(_BIT_ABOVE_MASK(n)) - 2)
+#define _IS_POW2(n) (((n) & ((n) - 1)) == 0)
+#define _IS_CONTIG(n) _IS_POW2(_BIT_ABOVE_MASK(n))
+#define _CHECK_MASK(m) BUILD_BUG_ON_ZERO(!_IS_CONTIG(m))
+#define SUBDEV_MASKED(v, m) .subdevice = (v) + _CHECK_MASK(m), \
+ .subdevice_m_l = _LOWEST_BIT(m), \
+ .subdevice_m_h = _HIGHEST_BIT(m)
+#define RF_TYPE(n) .match_rf_type = 1, \
+ .rf_type = IWL_CFG_RF_TYPE_##n
+#define RF_STEP(n) .match_rf_step = 1, \
+ .rf_step = SILICON_##n##_STEP
+#define RF_ID(n) .match_rf_id = 1, \
+ .rf_id = IWL_CFG_RF_ID_##n
+#define NO_CDB .match_cdb = 1, .cdb = 0
+#define CDB .match_cdb = 1, .cdb = 1
+#define BW_NOT_LIMITED .match_bw_limit = 1, .bw_limit = 0
+#define BW_LIMITED .match_bw_limit = 1, .bw_limit = 1
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLDVM)
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x4, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name,
+ DEVICE(0x4232), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_agn_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x4, 0xF)),
+ IWL_DEV_INFO(iwl5100_n_cfg, iwl5100_bgn_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl5100_abg_cfg, iwl5100_abg_name,
+ DEVICE(0x4237), SUBDEV_MASKED(0x6, 0xF)),
+
+/* 5300 Series WiFi */
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4235), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4235), SUBDEV_MASKED(0x4, 0xF)),
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4236), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5300_agn_cfg, iwl5300_agn_name,
+ DEVICE(0x4236), SUBDEV_MASKED(0x4, 0xF)),
+
+/* 5350 Series WiFi/WiMax */
+ IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name,
+ DEVICE(0x423A)),
+ IWL_DEV_INFO(iwl5350_agn_cfg, iwl5350_agn_name,
+ DEVICE(0x423B)),
+
+/* 5150 Series Wifi/WiMax */
+ IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name,
+ DEVICE(0x423C), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name,
+ DEVICE(0x423C), SUBDEV_MASKED(0x6, 0xF)),
+
+ IWL_DEV_INFO(iwl5150_agn_cfg, iwl5150_agn_name,
+ DEVICE(0x423D), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl5150_abg_cfg, iwl5150_abg_name,
+ DEVICE(0x423D), SUBDEV_MASKED(0x6, 0xF)),
+
+/* 6x00 Series */
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x422B), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x422B), SUBDEV_MASKED(0x8, 0xF)),
+ IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name,
+ DEVICE(0x422C), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name,
+ DEVICE(0x422C), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2bg_name,
+ DEVICE(0x422C), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x4238), SUBDEV(0x1111)),
+ IWL_DEV_INFO(iwl6000_3agn_cfg, iwl6000_3agn_name,
+ DEVICE(0x4238), SUBDEV(0x1118)),
+ IWL_DEV_INFO(iwl6000i_2agn_cfg, iwl6000i_2agn_name,
+ DEVICE(0x4239), SUBDEV(0x1311)),
+ IWL_DEV_INFO(iwl6000i_non_n_cfg, iwl6000i_2abg_name,
+ DEVICE(0x4239), SUBDEV(0x1316)),
+
+/* 6x05 Series */
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2bg_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0x8, 0xF)),
+
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0x8, 0xF)),
+ IWL_DEV_INFO(iwl6005_non_n_cfg, iwl6005_2abg_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0x6, 0xF)),
+
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name,
+ DEVICE(0x0082), SUBDEV_MASKED(0xC000, 0xF000)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_d_name,
+ DEVICE(0x0082), SUBDEV(0x4820)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow1_name,
+ DEVICE(0x0082), SUBDEV(0x1304)),/* low 5GHz active */
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow2_name,
+ DEVICE(0x0082), SUBDEV(0x1305)),/* high 5GHz active */
+
+/* 6x30 Series */
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name,
+ DEVICE(0x008A), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name,
+ DEVICE(0x008A), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl1030_bgn_name,
+ DEVICE(0x008B), SUBDEV(0x5315)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl1030_bg_name,
+ DEVICE(0x008B), SUBDEV(0x5317)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name,
+ DEVICE(0x0090), SUBDEV(0x5211)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name,
+ DEVICE(0x0090), SUBDEV(0x5215)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name,
+ DEVICE(0x0090), SUBDEV(0x5216)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2agn_name,
+ DEVICE(0x0091), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6030_n_cfg, iwl6030_2bgn_name,
+ DEVICE(0x0091), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2abg_name,
+ DEVICE(0x0091), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6030_non_n_cfg, iwl6030_2bg_name,
+ DEVICE(0x0091), SUBDEV(0x5207)),
+
+/* 6x50 WiFi/WiMax Series */
+ IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name,
+ DEVICE(0x0087), SUBDEV_MASKED(0x1, 0xF)),
+ IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name,
+ DEVICE(0x0087), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl6050_2agn_cfg, iwl6050_2agn_name,
+ DEVICE(0x0089), SUBDEV(0x1311)),
+ IWL_DEV_INFO(iwl6050_2abg_cfg, iwl6050_2abg_name,
+ DEVICE(0x0089), SUBDEV(0x1316)),
+
+/* 6150 WiFi/WiMax Series */
+ IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name,
+ DEVICE(0x0885), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name,
+ DEVICE(0x0885), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl6150_bgn_cfg, iwl6150_bgn_name,
+ DEVICE(0x0886), SUBDEV(0x1315)),
+ IWL_DEV_INFO(iwl6150_bg_cfg, iwl6150_bg_name,
+ DEVICE(0x0886), SUBDEV(0x1317)),
+
+/* 1000 Series WiFi */
+ IWL_DEV_INFO(iwl1000_bgn_cfg, iwl1000_bgn_name,
+ DEVICE(0x0083), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
+ DEVICE(0x0083), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
+ DEVICE(0x0084), SUBDEV(0x1216)),
+ IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
+ DEVICE(0x0084), SUBDEV(0x1316)),
+
+/* 100 Series WiFi */
+ IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name,
+ DEVICE(0x08AE), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name,
+ DEVICE(0x08AE), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name,
+ DEVICE(0x08AF), SUBDEV(0x1015)),
+ IWL_DEV_INFO(iwl100_bg_cfg, iwl100_bg_name,
+ DEVICE(0x08AF), SUBDEV(0x1017)),
+
+/* 130 Series WiFi */
+ IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name,
+ DEVICE(0x0896), SUBDEV_MASKED(0x5, 0xF)),
+ IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name,
+ DEVICE(0x0896), SUBDEV_MASKED(0x7, 0xF)),
+ IWL_DEV_INFO(iwl130_bgn_cfg, iwl130_bgn_name,
+ DEVICE(0x0897), SUBDEV(0x5015)),
+ IWL_DEV_INFO(iwl130_bg_cfg, iwl130_bg_name,
+ DEVICE(0x0897), SUBDEV(0x5017)),
+
+/* 2x00 Series */
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name,
+ DEVICE(0x0890), SUBDEV(0x4022)),
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name,
+ DEVICE(0x0891), SUBDEV(0x4222)),
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_name,
+ DEVICE(0x0890), SUBDEV(0x4422)),
+ IWL_DEV_INFO(iwl2000_2bgn_cfg, iwl2000_2bgn_d_name,
+ DEVICE(0x0890), SUBDEV(0x4822)),
+
+/* 2x30 Series */
+ IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name,
+ DEVICE(0x0887)),
+ IWL_DEV_INFO(iwl2030_2bgn_cfg, iwl2030_2bgn_name,
+ DEVICE(0x0888), SUBDEV(0x4262)),
+
+/* 6x35 Series */
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name,
+ DEVICE(0x088E), SUBDEV_MASKED(0x0, 0xF)),
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name,
+ DEVICE(0x088E), SUBDEV_MASKED(0xA, 0xF)),
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_name,
+ DEVICE(0x088F), SUBDEV_MASKED(0x0, 0xF)),
+ IWL_DEV_INFO(iwl6035_2agn_cfg, iwl6035_2agn_sff_name,
+ DEVICE(0x088F), SUBDEV_MASKED(0xA, 0xF)),
+
+/* 105 Series */
+ IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name,
+ DEVICE(0x0894)),
+ IWL_DEV_INFO(iwl105_bgn_cfg, iwl105_bgn_name,
+ DEVICE(0x0895), SUBDEV(0x0222)),
+
+/* 135 Series */
+ IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name,
+ DEVICE(0x0892)),
+ IWL_DEV_INFO(iwl135_bgn_cfg, iwl135_bgn_name,
+ DEVICE(0x0893), SUBDEV(0x0262)),
+#endif /* CONFIG_IWLDVM */
+
#if IS_ENABLED(CONFIG_IWLMVM)
-/* 9000 */
- IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
- IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
- IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
- IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
- IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
- IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
-
- IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
- IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name),
- IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma, iwl_ax411_killer_1690i_name),
-
-/* AX200 */
- IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
- IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name),
- IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
-
- /* Qu with Hr */
- IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
- IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
- IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
-
- IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
-
- IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
- IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
- IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
-
- /* So with HR */
- IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL),
- IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name),
- IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name),
- IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL),
- IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL),
-
- /* So with JF */
- IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
- IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
- IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
-
- /* SO with GF2 */
- IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
- IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name),
-
- /* MA with GF2 */
- IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma, iwl_ax211_killer_1675s_name),
- IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma, iwl_ax211_killer_1675i_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_2ac_cfg_soc, iwl9560_name),
-
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9270_160_name),
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9270_name),
-
- _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9162_160_name),
- _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9162_name),
-
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9260_160_name),
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9260_name),
-
-/* Qu with Jf */
- /* Qu B step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
-
- /* Qu C step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
-
- /* QuZ */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
-
-/* Qu with Hr */
- /* Qu B step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_b0_hr1_b0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_b0_hr_b0, iwl_ax203_name),
-
- /* Qu C step */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_c0_hr1_b0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_c0_hr_b0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_qu_c0_hr_b0, iwl_ax201_name),
-
- /* QuZ */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_quz_a0_hr1_b0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_quz_a0_hr_b0, iwl_ax201_name),
-
-/* Ma */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax201_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_ma, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax231_name),
-
-/* So with Hr */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
-
-/* So-F with Hr */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
-
-/* So-F with Gf */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
- iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-
-/* SoF with JF2 */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
-
-/* SoF with JF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
-
-/* So with GF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
- iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-
-/* So with JF2 */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
-
-/* So with JF */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+/* 7260 Series */
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1)), // unlisted ones fall through to here
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4060)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x406A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4160)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0x4062)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0x4162)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4460)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x446A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0x4462)),
+ IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1), SUBDEV(0x4A70)),
+ IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1), SUBDEV(0x4A6E)),
+ IWL_DEV_INFO(iwl7260_high_temp_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B1), SUBDEV(0x4A6C)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4560)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4020)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x402A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0x4420)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC060)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC06A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC160)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0xC062)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0xC162)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC760)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC460)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B1), SUBDEV(0xC462)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC560)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC360)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC020)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC02A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B1), SUBDEV(0xC420)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0x4270)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0x4272)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x4260)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x426A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B2), SUBDEV(0x4262)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0x4370)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x4360)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0x4220)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0xC270)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0xC272)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0xC260)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B2), SUBDEV(0xC26A)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_n_name,
+ DEVICE(0x08B2), SUBDEV(0xC262)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2ac_name,
+ DEVICE(0x08B2), SUBDEV(0xC370)),
+ IWL_DEV_INFO(iwl7260_cfg, iwl7260_2n_name,
+ DEVICE(0x08B2), SUBDEV(0xC220)),
+/* 3160 Series */
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name,
+ DEVICE(0x08B3)),
+
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_n_name,
+ DEVICE(0x08B3), SUBDEV_MASKED(0x62, 0xFF)),
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_2n_name,
+ DEVICE(0x08B3), SUBDEV_MASKED(0x60, 0xFF)),
+
+ IWL_DEV_INFO(iwl3160_cfg, iwl3160_2ac_name,
+ DEVICE(0x08B4)),
+
+/* 3165 Series */
+ IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name,
+ DEVICE(0x3165)),
+ IWL_DEV_INFO(iwl3165_2ac_cfg, iwl3165_2ac_name,
+ DEVICE(0x3166)),
+
+/* 3168 Series */
+ IWL_DEV_INFO(iwl3168_2ac_cfg, iwl3168_2ac_name,
+ DEVICE(0x24FB)),
+
+/* 7265 Series */
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name,
+ DEVICE(0x095A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5000)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x500A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095A), SUBDEV(0x5002)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095A), SUBDEV(0x5102)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5020)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x502A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5090)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5190)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5100)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5400)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5420)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5490)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5C10)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x5590)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x9000)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x900A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095A), SUBDEV(0x9400)),
+
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2ac_name,
+ DEVICE(0x095B)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095B), SUBDEV(0x520A)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095B), SUBDEV(0x5302)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095B), SUBDEV(0x5200)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_n_name,
+ DEVICE(0x095B), SUBDEV(0x5202)),
+ IWL_DEV_INFO(iwl7265_cfg, iwl7265_2n_name,
+ DEVICE(0x095B), SUBDEV(0x9200)),
+
+/* 8000 Series */
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2ac_name,
+ DEVICE(0x24F3)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name,
+ DEVICE(0x24F3), SUBDEV(0x0004)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name,
+ DEVICE(0x24F3), SUBDEV(0x0044)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8265_2ac_name,
+ DEVICE(0x24FD)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x3E02)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x3E01)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x1012)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
+ DEVICE(0x24FD), SUBDEV(0x0012)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1435i_name,
+ DEVICE(0x24FD), SUBDEV(0x1431)),
+ IWL_DEV_INFO(iwl8265_cfg, iwl_killer_1434_kix_name,
+ DEVICE(0x24FD), SUBDEV(0x1432)),
+
+/* JF1 RF */
+ IWL_DEV_INFO(iwl_rf_jf, iwl9461_160_name,
+ RF_TYPE(JF1)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9461_name,
+ RF_TYPE(JF1), BW_LIMITED),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9462_160_name,
+ RF_TYPE(JF1), RF_ID(JF1_DIV)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9462_name,
+ RF_TYPE(JF1), RF_ID(JF1_DIV), BW_LIMITED),
+/* JF2 RF */
+ IWL_DEV_INFO(iwl_rf_jf, iwl9260_160_name,
+ RF_TYPE(JF2)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9260_name,
+ RF_TYPE(JF2), BW_LIMITED),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9560_160_name,
+ RF_TYPE(JF2), RF_ID(JF)),
+ IWL_DEV_INFO(iwl_rf_jf_80mhz, iwl9560_name,
+ RF_TYPE(JF2), RF_ID(JF), BW_LIMITED),
+
+/* HR RF */
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_name, RF_TYPE(HR2)),
+ IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax101_name, RF_TYPE(HR1)),
+ IWL_DEV_INFO(iwl_rf_hr_80mhz, iwl_ax203_name, RF_TYPE(HR2), BW_LIMITED),
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_name, DEVICE(0x2723)),
+
+/* GF RF */
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_name, RF_TYPE(GF)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_name, RF_TYPE(GF), CDB),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_name, DEVICE(0x2725)),
+
+/* Killer CRFs */
+ IWL_DEV_INFO(iwl_rf_jf, iwl9260_killer_1550_name, SUBDEV(0x1550)),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550s_name, SUBDEV(0x1551)),
+ IWL_DEV_INFO(iwl_rf_jf, iwl9560_killer_1550i_name, SUBDEV(0x1552)),
+
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650s_name, SUBDEV(0x1651)),
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax201_killer_1650i_name, SUBDEV(0x1652)),
+
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675s_name, SUBDEV(0x1671)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax211_killer_1675i_name, SUBDEV(0x1672)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675w_name, SUBDEV(0x1673)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax210_killer_1675x_name, SUBDEV(0x1674)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690s_name, SUBDEV(0x1691)),
+ IWL_DEV_INFO(iwl_rf_gf, iwl_ax411_killer_1690i_name, SUBDEV(0x1692)),
+
+/* Killer discrete */
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650w_name,
+ DEVICE(0x2723), SUBDEV(0x1653)),
+ IWL_DEV_INFO(iwl_rf_hr, iwl_ax200_killer_1650x_name,
+ DEVICE(0x2723), SUBDEV(0x1654)),
#endif /* CONFIG_IWLMVM */
#if IS_ENABLED(CONFIG_IWLMLD)
-/* Bz */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax201_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax211_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_fm_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_wh_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax201_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_ax211_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_fm_name),
-
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_wh_name),
-
-/* Ga (Gl) */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_gl_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_mtp_name),
-
-/* Sc */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_fm_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_wh_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_fm_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_wh_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_ax211_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_fm_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_wh_name),
-
-/* Dr */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_dr, iwl_dr_name),
-
-/* Br */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_br, iwl_br_name),
+/* FM RF */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_be201_name, RF_TYPE(FM)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_be401_name, RF_TYPE(FM), CDB),
+ /* the discrete NICs got the RF B0, it's only for the name anyway */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_be200_name, RF_TYPE(FM),
+ DEVICE(0x272B), RF_STEP(B)),
+ IWL_DEV_INFO(iwl_rf_fm_160mhz, iwl_be202_name,
+ RF_TYPE(FM), BW_LIMITED),
+
+/* Killer CRFs */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750s_name, SUBDEV(0x1771)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750i_name, SUBDEV(0x1772)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790s_name, SUBDEV(0x1791)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1790i_name, SUBDEV(0x1792)),
+
+/* Killer discrete */
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750w_name,
+ DEVICE(0x272B), SUBDEV(0x1773)),
+ IWL_DEV_INFO(iwl_rf_fm, iwl_killer_be1750x_name,
+ DEVICE(0x272B), SUBDEV(0x1774)),
+
+/* WH RF */
+ IWL_DEV_INFO(iwl_rf_wh, iwl_be211_name, RF_TYPE(WH)),
+ IWL_DEV_INFO(iwl_rf_wh_160mhz, iwl_be213_name, RF_TYPE(WH), BW_LIMITED),
+
+/* PE RF */
+ IWL_DEV_INFO(iwl_rf_pe, iwl_bn201_name, RF_TYPE(PE)),
+ IWL_DEV_INFO(iwl_rf_pe, iwl_be223_name, RF_TYPE(PE), SUBDEV(0x0524)),
+ IWL_DEV_INFO(iwl_rf_pe, iwl_be221_name, RF_TYPE(PE), SUBDEV(0x0324)),
+
+/* Killer */
+ IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775s_name, SUBDEV(0x1776)),
+ IWL_DEV_INFO(iwl_rf_wh, iwl_killer_be1775i_name, SUBDEV(0x1775)),
+
+ IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850w2_name, SUBDEV(0x1851)),
+ IWL_DEV_INFO(iwl_rf_pe, iwl_killer_bn1850i_name, SUBDEV(0x1852)),
#endif /* CONFIG_IWLMLD */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
@@ -1246,13 +1076,15 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
/*
* Read rf id and cdb info from prph register and store it
*/
-static void get_crf_id(struct iwl_trans *iwl_trans)
+static void get_crf_id(struct iwl_trans *iwl_trans,
+ struct iwl_trans_info *info)
{
u32 sd_reg_ver_addr;
+ u32 hw_wfpm_id;
u32 val = 0;
u8 step;
- if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
else
sd_reg_ver_addr = SD_REG_VER;
@@ -1263,83 +1095,83 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);
/* Read crf info */
- iwl_trans->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
+ info->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
/* Read cnv info */
- iwl_trans->hw_cnv_id =
- iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ info->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
/* For BZ-W, take B step also when A step is indicated */
- if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
+ if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
step = SILICON_B_STEP;
/* In BZ, the MAC step must be read from the CNVI aux register */
- if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
- step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+ if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ step = CNVI_AUX_MISC_CHIP_MAC_STEP(info->hw_cnv_id);
/* For BZ-U, take B step also when A step is indicated */
- if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(info->hw_cnv_id) ==
CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
step == SILICON_A_STEP)
step = SILICON_B_STEP;
}
- if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
- CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
- iwl_trans->hw_rev_step = step;
- iwl_trans->hw_rev |= step;
+ if (CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
+ CSR_HW_REV_TYPE(info->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
+ info->hw_rev_step = step;
+ info->hw_rev |= step;
}
/* Read cdb info (also contains the jacket info if needed in the future */
- iwl_trans->hw_wfpm_id =
- iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
+ hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
- iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id,
- iwl_trans->hw_wfpm_id);
+ info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);
}
/*
* In case that there is no OTP on the NIC, map the rf id and cdb info
* from the prph registers.
*/
-static int map_crf_id(struct iwl_trans *iwl_trans)
+static int map_crf_id(struct iwl_trans *iwl_trans,
+ struct iwl_trans_info *info)
{
int ret = 0;
- u32 val = iwl_trans->hw_crf_id;
+ u32 val = info->hw_crf_id;
u32 step_id = REG_CRF_ID_STEP(val);
u32 slave_id = REG_CRF_ID_SLAVE(val);
- u32 jacket_id_cnv = REG_CRF_ID_SLAVE(iwl_trans->hw_cnv_id);
- u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(iwl_trans->hw_wfpm_id);
- u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(iwl_trans->hw_wfpm_id);
+ u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);
+ u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,
+ WFPM_OTP_CFG1_ADDR);
+ u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);
+ u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);
/* Map between crf id to rf id */
switch (REG_CRF_ID_TYPE(val)) {
case REG_CRF_ID_TYPE_JF_1:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);
break;
case REG_CRF_ID_TYPE_JF_2:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);
break;
case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);
break;
case REG_CRF_ID_TYPE_HR_NONE_CDB:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
break;
case REG_CRF_ID_TYPE_HR_CDB:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
break;
case REG_CRF_ID_TYPE_GF:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
break;
case REG_CRF_ID_TYPE_FM:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
break;
case REG_CRF_ID_TYPE_WHP:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
break;
case REG_CRF_ID_TYPE_PE:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
+ info->hw_rf_id = (IWL_CFG_RF_TYPE_PE << 12);
break;
default:
ret = -EIO;
@@ -1351,28 +1183,28 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
}
/* Set Step-id */
- iwl_trans->hw_rf_id |= (step_id << 8);
+ info->hw_rf_id |= (step_id << 8);
/* Set CDB capabilities */
if (cdb_id_wfpm || slave_id) {
- iwl_trans->hw_rf_id += BIT(28);
+ info->hw_rf_id += BIT(28);
IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
}
/* Set Jacket capabilities */
if (jacket_id_wfpm || jacket_id_cnv) {
- iwl_trans->hw_rf_id += BIT(29);
+ info->hw_rf_id += BIT(29);
IWL_INFO(iwl_trans, "Adding jacket to rf id\n");
}
IWL_INFO(iwl_trans,
"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",
- REG_CRF_ID_TYPE(val), step_id, slave_id, iwl_trans->hw_rf_id);
+ REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);
IWL_INFO(iwl_trans,
"Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",
- cdb_id_wfpm, jacket_id_wfpm, iwl_trans->hw_wfpm_id);
+ cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);
IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",
- jacket_id_cnv, iwl_trans->hw_cnv_id);
+ jacket_id_cnv, info->hw_cnv_id);
out:
return ret;
@@ -1382,9 +1214,8 @@ out:
#define PCI_CFG_RETRY_TIMEOUT 0x041
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
-iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
- u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 rf_type, u8 cdb,
+ u8 rf_id, u8 bw_limit, u8 rf_step)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
@@ -1394,49 +1225,32 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
for (i = num_devices - 1; i >= 0; i--) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+ u16 subdevice_mask;
if (dev_info->device != (u16)IWL_CFG_ANY &&
dev_info->device != device)
continue;
- if (dev_info->subdevice != (u16)IWL_CFG_ANY &&
- dev_info->subdevice != subsystem_device)
- continue;
-
- if (dev_info->mac_type != (u16)IWL_CFG_ANY &&
- dev_info->mac_type != mac_type)
- continue;
-
- if (dev_info->mac_step != (u8)IWL_CFG_ANY &&
- dev_info->mac_step != mac_step)
- continue;
-
- if (dev_info->rf_type != (u16)IWL_CFG_ANY &&
- dev_info->rf_type != rf_type)
- continue;
+ subdevice_mask = GENMASK(dev_info->subdevice_m_h,
+ dev_info->subdevice_m_l);
- if (dev_info->cdb != (u8)IWL_CFG_ANY &&
- dev_info->cdb != cdb)
+ if (dev_info->subdevice != (u16)IWL_CFG_ANY &&
+ dev_info->subdevice != (subsystem_device & subdevice_mask))
continue;
- if (dev_info->jacket != (u8)IWL_CFG_ANY &&
- dev_info->jacket != jacket)
+ if (dev_info->match_rf_type && dev_info->rf_type != rf_type)
continue;
- if (dev_info->rf_id != (u8)IWL_CFG_ANY &&
- dev_info->rf_id != rf_id)
+ if (dev_info->match_cdb && dev_info->cdb != cdb)
continue;
- if (dev_info->no_160 != (u8)IWL_CFG_ANY &&
- dev_info->no_160 != no_160)
+ if (dev_info->match_rf_id && dev_info->rf_id != rf_id)
continue;
- if (dev_info->cores != (u8)IWL_CFG_ANY &&
- dev_info->cores != cores)
+ if (dev_info->match_bw_limit && dev_info->bw_limit != bw_limit)
continue;
- if (dev_info->rf_step != (u8)IWL_CFG_ANY &&
- dev_info->rf_step != rf_step)
+ if (dev_info->match_rf_step && dev_info->rf_step != rf_step)
continue;
return dev_info;
@@ -1448,30 +1262,32 @@ EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static void iwl_pcie_recheck_me_status(struct work_struct *wk)
{
- struct iwl_trans *trans = container_of(wk, typeof(*trans),
- me_recheck_wk.work);
+ struct iwl_trans_pcie *trans_pcie = container_of(wk,
+ typeof(*trans_pcie),
+ me_recheck_wk.work);
u32 val;
- val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
- trans->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
+ val = iwl_read32(trans_pcie->trans, CSR_HW_IF_CONFIG_REG);
+ trans_pcie->me_present = !!(val & CSR_HW_IF_CONFIG_REG_IAMT_UP);
}
static void iwl_pcie_check_me_status(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- trans->me_present = -1;
+ trans_pcie->me_present = -1;
- INIT_DELAYED_WORK(&trans->me_recheck_wk,
+ INIT_DELAYED_WORK(&trans_pcie->me_recheck_wk,
iwl_pcie_recheck_me_status);
/* we don't have a good way of determining this until BZ */
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
return;
val = iwl_read_prph(trans, CNVI_SCU_REG_FOR_ECO_1);
if (val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_KNOWN) {
- trans->me_present =
+ trans_pcie->me_present =
!!(val & CNVI_SCU_REG_FOR_ECO_1_WIAMT_PRESENT);
return;
}
@@ -1479,38 +1295,28 @@ static void iwl_pcie_check_me_status(struct iwl_trans *trans)
val = iwl_read32(trans, CSR_HW_IF_CONFIG_REG);
if (val & (CSR_HW_IF_CONFIG_REG_ME_OWN |
CSR_HW_IF_CONFIG_REG_IAMT_UP)) {
- trans->me_present = 1;
+ trans_pcie->me_present = 1;
return;
}
/* recheck again later, ME might still be initializing */
- schedule_delayed_work(&trans->me_recheck_wk, HZ);
+ schedule_delayed_work(&trans_pcie->me_recheck_wk, HZ);
}
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg_trans_params *trans;
- const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ const struct iwl_mac_cfg *trans;
const struct iwl_dev_info *dev_info;
+ struct iwl_trans_info info = {
+ .hw_id = (pdev->device << 16) + pdev->subsystem_device,
+ };
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
- const struct iwl_cfg *cfg;
- trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
+ trans = (void *)ent->driver_data;
- /*
- * This is needed for backwards compatibility with the old
- * tables, so we don't need to change all the config structs
- * at the same time. The cfg is used to compare with the old
- * full cfg structs.
- */
- cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
-
- /* make sure trans is the first element in iwl_cfg */
- BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
-
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
+ iwl_trans = iwl_trans_pcie_alloc(pdev, trans, &info);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
@@ -1519,6 +1325,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_trans_pcie_check_product_reset_status(pdev);
iwl_trans_pcie_check_product_reset_mode(pdev);
+ /* set the things we know so far for the grab NIC access */
+ iwl_trans_set_info(iwl_trans, &info);
+
/*
* Let's try to grab NIC access early here. Sometimes, NICs may
* fail to initialize, and if that happens it's better if we see
@@ -1532,7 +1341,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
- get_crf_id(iwl_trans);
+ get_crf_id(iwl_trans, &info);
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
@@ -1541,38 +1350,32 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
+ info.hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
/*
* The RF_ID is set to zero in blank OTP so read version to
* extract the RF_ID.
* This is relevant only for family 9000 and up.
*/
- if (iwl_trans->trans_cfg->rf_id &&
- iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
- !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && map_crf_id(iwl_trans)) {
+ if (iwl_trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
+ !CSR_HW_RFID_TYPE(info.hw_rf_id) && map_crf_id(iwl_trans, &info)) {
ret = -EINVAL;
goto out_free_trans;
}
IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
pdev->device, pdev->subsystem_device,
- iwl_trans->hw_rev, iwl_trans->hw_rf_id);
+ info.hw_rev, info.hw_rf_id);
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
- CSR_HW_REV_TYPE(iwl_trans->hw_rev),
- iwl_trans->hw_rev_step,
- CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id),
- CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
- CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
+ CSR_HW_RFID_TYPE(info.hw_rf_id),
+ CSR_HW_RFID_IS_CDB(info.hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
- IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
- IWL_SUBDEVICE_CORES(pdev->subsystem_device),
- CSR_HW_RFID_STEP(iwl_trans->hw_rf_id));
+ IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),
+ CSR_HW_RFID_STEP(info.hw_rf_id));
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
- iwl_trans->name = dev_info->name;
- iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
+ info.name = dev_info->name;
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1583,82 +1386,41 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* all the parameters that the transport uses must, until that is
* changed, be identical to the ones in the 7265D configuration.
*/
- if (cfg == &iwl7265_2ac_cfg)
- cfg_7265d = &iwl7265d_2ac_cfg;
- else if (cfg == &iwl7265_2n_cfg)
- cfg_7265d = &iwl7265d_2n_cfg;
- else if (cfg == &iwl7265_n_cfg)
- cfg_7265d = &iwl7265d_n_cfg;
- if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
- iwl_trans->cfg = cfg_7265d;
-
- /*
- * This is a hack to switch from Qu B0 to Qu C0. We need to
- * do this for all cfgs that use Qu B0, except for those using
- * Jf, which have already been moved to the new table. The
- * rest must be removed once we convert Qu with Hr as well.
- */
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
- }
-
- /* same thing for QuZ... */
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
- iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
- else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
- else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
- iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr;
- }
-
+ if (iwl_trans->cfg == &iwl7265_cfg &&
+ (info.hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ iwl_trans->cfg = &iwl7265d_cfg;
#endif
- /*
- * If we didn't set the cfg yet, the PCI ID table entry should have
- * been a full config - if yes, use it, otherwise fail.
- */
if (!iwl_trans->cfg) {
- if (ent->driver_data & TRANS_CFG_MARKER) {
- pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
- pdev->device, pdev->subsystem_device,
- iwl_trans->hw_rev, iwl_trans->hw_rf_id);
- ret = -EINVAL;
- goto out_free_trans;
- }
- iwl_trans->cfg = cfg;
+ pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
+ pdev->device, pdev->subsystem_device,
+ info.hw_rev, info.hw_rf_id);
+ ret = -EINVAL;
+ goto out_free_trans;
}
- /* if we don't have a name yet, copy name from the old cfg */
- if (!iwl_trans->name)
- iwl_trans->name = iwl_trans->cfg->name;
+ IWL_INFO(iwl_trans, "Detected %s\n", info.name);
- IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name);
-
- if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (iwl_trans->mac_cfg->mq_rx_supported) {
if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
ret = -EINVAL;
goto out_free_trans;
}
- trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ trans_pcie->num_rx_bufs = iwl_trans_get_num_rbds(iwl_trans);
} else {
trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
}
- if (!iwl_trans->trans_cfg->integrated) {
+ if (!iwl_trans->mac_cfg->integrated) {
u16 link_status;
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status);
- iwl_trans->pcie_link_speed =
+ info.pcie_link_speed =
u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS);
}
+ iwl_trans_set_info(iwl_trans, &info);
+
ret = iwl_trans_init(iwl_trans);
if (ret)
goto out_free_trans;
@@ -1690,11 +1452,12 @@ out_free_trans:
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans)
return;
- cancel_delayed_work_sync(&trans->me_recheck_wk);
+ cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
iwl_drv_stop(trans->drv);
@@ -1738,27 +1501,11 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* Scratch value was altered, this means the device was powered off, we
* need to reset it completely.
* Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
- * but not bits [15:8]. So if we have bits set in lower word, assume
- * the device is alive.
- * For older devices, just try silently to grab the NIC.
+ * so assume that any bits there mean that the device is usable.
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
- CSR_FUNC_SCRATCH_POWER_OFF_MASK))
- device_was_powered_off = true;
- } else {
- /*
- * bh are re-enabled by iwl_trans_pcie_release_nic_access,
- * so re-enable them if _iwl_trans_pcie_grab_nic_access fails.
- */
- local_bh_disable();
- if (_iwl_trans_pcie_grab_nic_access(trans, true)) {
- iwl_trans_pcie_release_nic_access(trans);
- } else {
- device_was_powered_off = true;
- local_bh_enable();
- }
- }
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
+ !iwl_read32(trans, CSR_FUNC_SCRATCH))
+ device_was_powered_off = true;
if (restore || device_was_powered_off) {
trans->state = IWL_TRANS_NO_FW;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 114a9195ad7f..3b7c12fc4f9e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -194,7 +194,7 @@ struct iwl_rb_allocator {
static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
return le16_to_cpu(READ_ONCE(*rb_stts));
@@ -269,6 +269,7 @@ enum iwl_pcie_fw_reset_state {
FW_RESET_REQUESTED,
FW_RESET_OK,
FW_RESET_ERROR,
+ FW_RESET_TOP_REQUESTED,
};
/**
@@ -288,22 +289,14 @@ enum iwl_pcie_imr_status {
/**
* struct iwl_pcie_txqs - TX queues data
*
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
- * @page_offs: offset from skb->cb to mac header page pointer
- * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used: bit mask of used queues
* @queue_stopped: bit mask of stopped queues
* @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @queue_alloc_cmd_ver: queue allocation command version
* @bc_pool: bytecount DMA allocations pool
* @bc_tbl_size: bytecount table size
* @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
* (and similar usage)
- * @cmd: command queue data
- * @cmd.fifo: FIFO number
- * @cmd.q_id: queue ID
- * @cmd.wdg_timeout: watchdog timeout
* @tfd: TFD data
* @tfd.max_tbs: max number of buffers per TFD
* @tfd.size: TFD size
@@ -315,26 +308,15 @@ struct iwl_pcie_txqs {
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct dma_pool *bc_pool;
size_t bc_tbl_size;
- bool bc_table_dword;
- u8 page_offs;
- u8 dev_cmd_offs;
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
struct {
- u8 fifo;
- u8 q_id;
- unsigned int wdg_timeout;
- } cmd;
-
- struct {
u8 max_tbs;
u16 size;
u8 addr_size;
} tfd;
struct iwl_dma_ptr scd_bc_tbls;
-
- u8 queue_alloc_cmd_ver;
};
/**
@@ -344,7 +326,7 @@ struct iwl_pcie_txqs {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
- * @ctxt_info_gen3: context information for gen3 devices
+ * @ctxt_info_v2: context information for v1 devices
* @prph_info: prph info for self init
* @prph_scratch: prph scratch for self init
* @ctxt_info_dma_addr: dma addr of context information
@@ -352,6 +334,7 @@ struct iwl_pcie_txqs {
* @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @iml: image loader image virtual address
+ * @iml_len: image loader image size
* @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@@ -363,9 +346,6 @@ struct iwl_pcie_txqs {
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
- * @cmd_queue - command queue number
- * @rx_buf_size: Rx buffer size
- * @scd_set_active: should the transport configure the SCD for HCMD queue
* @rx_page_order: page order for receive buffer size
* @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
@@ -406,19 +386,20 @@ struct iwl_pcie_txqs {
* @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
* @opmode_down: indicates opmode went away
* @num_rx_bufs: number of RX buffers to allocate/use
- * @no_reclaim_cmds: special commands not using reclaim flow
- * (firmware workaround)
- * @n_no_reclaim_cmds: number of special commands not using reclaim flow
* @affinity_mask: IRQ affinity mask for each RX queue
* @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
* enable/disable
- * @fw_reset_handshake: indicates FW reset handshake is needed
* @fw_reset_state: state of FW reset handshake
* @fw_reset_waitq: waitqueue for FW reset handshake
* @is_down: indicates the NIC is down
* @isr_stats: interrupt statistics
* @napi_dev: (fake) netdev for NAPI registration
* @txqs: transport tx queues data.
+ * @me_present: WiAMT/CSME is detected as present (1), not present (0)
+ * or unknown (-1, so can still use it as a boolean safely)
+ * @me_recheck_wk: worker to recheck WiAMT/CSME presence
+ * @invalid_tx_cmd: invalid TX command buffer
+ * @wait_command_queue: wait queue for sync commands
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -427,11 +408,12 @@ struct iwl_trans_pcie {
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
- struct iwl_context_info_gen3 *ctxt_info_gen3;
+ struct iwl_context_info_v2 *ctxt_info_v2;
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
void *iml;
+ size_t iml_len;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
@@ -470,12 +452,8 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t sx_waitq;
- u8 n_no_reclaim_cmds;
- u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u16 num_rx_bufs;
- enum iwl_amsdu_size rx_buf_size;
- bool scd_set_active;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
u32 rx_buf_bytes;
@@ -510,7 +488,6 @@ struct iwl_trans_pcie {
void *base_rb_stts;
dma_addr_t base_rb_stts_dma;
- bool fw_reset_handshake;
enum iwl_pcie_fw_reset_state fw_reset_state;
wait_queue_head_t fw_reset_waitq;
enum iwl_pcie_imr_status imr_status;
@@ -518,6 +495,13 @@ struct iwl_trans_pcie {
char rf_name[32];
struct iwl_pcie_txqs txqs;
+
+ s8 me_present;
+ struct delayed_work me_recheck_wk;
+
+ struct iwl_dma_ptr invalid_tx_cmd;
+
+ wait_queue_head_t wait_command_queue;
};
static inline struct iwl_trans_pcie *
@@ -552,8 +536,8 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
*/
struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct iwl_cfg_trans_params *cfg_trans);
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info);
void iwl_trans_pcie_free(struct iwl_trans *trans);
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
struct device *dev);
@@ -623,7 +607,7 @@ struct iwl_tso_page_info {
IWL_TSO_PAGE_DATA_SIZE))
int iwl_pcie_tx_init(struct iwl_trans *trans);
-void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+void iwl_pcie_tx_start(struct iwl_trans *trans);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
@@ -679,7 +663,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
idx = iwl_txq_get_cmd_index(txq, idx);
return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
@@ -718,7 +702,7 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
{
return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
}
/**
@@ -729,7 +713,7 @@ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
{
return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
}
void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
@@ -752,10 +736,12 @@ int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
tfd->num_tbs = 0;
- iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
+ iwl_txq_gen2_set_tb(trans, tfd, trans_pcie->invalid_tx_cmd.dma,
+ trans_pcie->invalid_tx_cmd.size);
}
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
@@ -782,7 +768,7 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
struct iwl_tfd *tfd;
struct iwl_tfd_tb *tb;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
@@ -940,11 +926,13 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
-static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
+static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans,
+ bool top_reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
+ IWL_DEBUG_ISR(trans, "Enabling %s interrupt only\n",
+ top_reset ? "RESET" : "ALIVE");
if (!trans_pcie->msix_enabled) {
/*
@@ -954,11 +942,20 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
* RX interrupt which will allow us to receive the ALIVE
* notification (which is Rx) and continue the flow.
*/
- trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
+ if (top_reset)
+ trans_pcie->inta_mask = CSR_INT_BIT_RESET_DONE;
+ else
+ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE |
+ CSR_INT_BIT_FH_RX;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
} else {
- iwl_enable_hw_int_msk_msix(trans,
- MSIX_HW_INT_CAUSES_REG_ALIVE);
+ u32 val = top_reset ? MSIX_HW_INT_CAUSES_REG_RESET_DONE
+ : MSIX_HW_INT_CAUSES_REG_ALIVE;
+
+ iwl_enable_hw_int_msk_msix(trans, val);
+
+ if (top_reset)
+ return;
/*
* Leave all the FH causes enabled to get the ALIVE
* notification.
@@ -1005,7 +1002,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
/*
* On 9000-series devices this bit isn't enabled by default, so
* when we power down the device we need set the bit to allow it
@@ -1076,8 +1073,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
-void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
@@ -1107,11 +1103,14 @@ int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
void __releases(nic_access_nobh)
iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 1 exported functions */
-void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill);
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill);
void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
/* common functions that are used by gen2 transport */
@@ -1129,15 +1128,12 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
-/* common functions that are used by gen3 transport */
-void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
-
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill);
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
-int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4a4f8de4efe2..f0405eddc367 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -12,7 +12,8 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
+#include "fw/dbg.h"
/******************************************************************************
*
@@ -143,12 +144,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
/* TODO: remove this once fw does it */
- iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
- return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0);
+ return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
- } else if (trans->trans_cfg->mq_rx_supported) {
+ } else if (trans->mac_cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -175,7 +176,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
* 1. shadow registers aren't enabled
* 2. there is a chance that the NIC is asleep
*/
- if (!trans->trans_cfg->base_params->shadow_reg_enable &&
+ if (!trans->mac_cfg->base->shadow_reg_enable &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
@@ -190,9 +191,9 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
- if (!trans->trans_cfg->mq_rx_supported)
+ if (!trans->mac_cfg->mq_rx_supported)
iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
HBUS_TARG_WRPTR_RX_Q(rxq->id));
else
@@ -205,7 +206,7 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (!rxq->need_update)
@@ -221,7 +222,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -348,7 +349,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
iwl_pcie_rxmq_restock(trans, rxq);
else
iwl_pcie_rxsq_restock(trans, rxq);
@@ -362,8 +363,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
+ unsigned int rbsize = trans_pcie->rx_buf_bytes;
struct page *page;
gfp_t gfp_mask = priority;
@@ -657,19 +658,19 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_transfer_desc);
- return trans->trans_cfg->mq_rx_supported ?
+ return trans->mac_cfg->mq_rx_supported ?
sizeof(__le64) : sizeof(__le32);
}
static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
return sizeof(struct iwl_rx_completion_desc_bz);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return sizeof(struct iwl_rx_completion_desc);
return sizeof(__le32);
@@ -701,7 +702,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
{
- bool use_rx_td = (trans->trans_cfg->device_family >=
+ bool use_rx_td = (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210);
if (use_rx_td)
@@ -720,8 +721,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
int free_size;
spin_lock_init(&rxq->lock);
- if (trans->trans_cfg->mq_rx_supported)
- rxq->queue_size = trans->cfg->num_rbds;
+ if (trans->mac_cfg->mq_rx_supported)
+ rxq->queue_size = iwl_trans_get_num_rbds(trans);
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -736,7 +737,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (!rxq->bd)
goto err;
- if (trans->trans_cfg->mq_rx_supported) {
+ if (trans->mac_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
iwl_pcie_used_bd_size(trans) *
rxq->queue_size,
@@ -753,7 +754,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
return 0;
err:
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
iwl_pcie_free_rxq_dma(trans, rxq);
@@ -772,7 +773,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
if (WARN_ON(trans_pcie->rxq))
return -EINVAL;
- trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),
GFP_KERNEL);
trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
sizeof(trans_pcie->rx_pool[0]),
@@ -795,7 +796,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
*/
trans_pcie->base_rb_stts =
dma_alloc_coherent(trans->dev,
- rb_stts_size * trans->num_rx_queues,
+ rb_stts_size * trans->info.num_rxqs,
&trans_pcie->base_rb_stts_dma,
GFP_KERNEL);
if (!trans_pcie->base_rb_stts) {
@@ -803,7 +804,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
goto err;
}
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
rxq->id = i;
@@ -816,7 +817,7 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
err:
if (trans_pcie->base_rb_stts) {
dma_free_coherent(trans->dev,
- rb_stts_size * trans->num_rx_queues,
+ rb_stts_size * trans->info.num_rxqs,
trans_pcie->base_rb_stts,
trans_pcie->base_rb_stts_dma);
trans_pcie->base_rb_stts = NULL;
@@ -834,11 +835,10 @@ err:
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_4K:
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
break;
@@ -906,7 +906,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
u32 rb_size, enabled = 0;
int i;
- switch (trans_pcie->rx_buf_size) {
+ switch (trans->conf.rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = RFH_RXF_DMA_RB_SIZE_2K;
break;
@@ -932,7 +932,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* disable free amd used rx queue operation */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
/* Tell device where to find RBD free table in DRAM */
iwl_write_prph64_no_grab(trans,
RFH_Q_FRBDCB_BA_LSB(i),
@@ -976,7 +976,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP |
RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
- trans->trans_cfg->integrated ?
+ trans->mac_cfg->integrated ?
RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
RFH_GEN_CFG_RB_CHUNK_SIZE_128));
/* Enable the relevant rx queues */
@@ -1072,7 +1072,7 @@ void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
if (unlikely(!trans_pcie->rxq))
return;
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (rxq && rxq->napi.poll)
@@ -1109,7 +1109,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
for (i = 0; i < RX_QUEUE_SIZE; i++)
def_rxq->queue[i] = NULL;
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
spin_lock_bh(&rxq->lock);
@@ -1122,7 +1122,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0,
- (trans->trans_cfg->device_family >=
+ (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_AX210) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
@@ -1144,9 +1144,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
}
/* move the pool to the default queue and allocator ownerships */
- queue_size = trans->trans_cfg->mq_rx_supported ?
+ queue_size = trans->mac_cfg->mq_rx_supported ?
trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
- allocator_pool_size = trans->num_rx_queues *
+ allocator_pool_size = trans->info.num_rxqs *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
@@ -1175,7 +1175,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (ret)
return ret;
- if (trans->trans_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
else
iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
@@ -1223,14 +1223,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (trans_pcie->base_rb_stts) {
dma_free_coherent(trans->dev,
- rb_stts_size * trans->num_rx_queues,
+ rb_stts_size * trans->info.num_rxqs,
trans_pcie->base_rb_stts,
trans_pcie->base_rb_stts_dma);
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
- for (i = 0; i < trans->num_rx_queues; i++) {
+ for (i = 0; i < trans->info.num_rxqs; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
iwl_pcie_free_rxq_dma(trans, rxq);
@@ -1301,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1368,8 +1368,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (reclaim && !pkt->hdr.group_id) {
int i;
- for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
- if (trans_pcie->no_reclaim_cmds[i] ==
+ for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {
+ if (trans->conf.no_reclaim_cmds[i] ==
pkt->hdr.cmd) {
reclaim = false;
break;
@@ -1408,7 +1408,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
break;
}
@@ -1454,18 +1454,18 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
- if (!trans->trans_cfg->mq_rx_supported) {
+ if (!trans->mac_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
- } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_completion_desc *cd = rxq->used_bd;
vid = le16_to_cpu(cd[i].rbid);
@@ -1648,7 +1648,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
- if (WARN_ON(entry->entry >= trans->num_rx_queues))
+ if (WARN_ON(entry->entry >= trans->info.num_rxqs))
return IRQ_NONE;
if (!trans_pcie->rxq) {
@@ -1683,18 +1683,18 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
- !trans->cfg->apmg_not_supported &&
+ !trans->mac_cfg->base->apmg_not_supported &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
return;
}
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
if (!trans_pcie->txqs.txq[i])
continue;
timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);
@@ -1705,7 +1705,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
}
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
@@ -1820,7 +1820,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
&trans->status))
IWL_DEBUG_RF_KILL(trans,
"Rfkill while SYNC HCMD in flight\n");
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
if (trans_pcie->opmode_down)
@@ -1828,6 +1828,54 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
}
}
+static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 state;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
+ u32 val = iwl_read32(trans, CSR_IPC_STATE);
+
+ state = u32_get_bits(val, CSR_IPC_STATE_RESET);
+ IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state);
+ } else {
+ state = CSR_IPC_STATE_RESET_SW_READY;
+ }
+
+ switch (state) {
+ case CSR_IPC_STATE_RESET_SW_READY:
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "Reset flow completed\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ break;
+ }
+ fallthrough;
+ case CSR_IPC_STATE_RESET_TOP_READY:
+ /* FIXME: handle this case when requesting TOP reset */
+ fallthrough;
+ case CSR_IPC_STATE_RESET_NONE:
+ IWL_FW_CHECK_FAILED(trans,
+ "Invalid reset interrupt (state=%d)!\n",
+ state);
+ break;
+ case CSR_IPC_STATE_RESET_TOP_FOLLOWER:
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ /* if we were in reset, wake that up */
+ IWL_INFO(trans,
+ "TOP reset from BT while doing reset\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ IWL_INFO(trans, "TOP reset from BT\n");
+ trans->state = IWL_TRANS_NO_FW;
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_RESET_BY_BT);
+ }
+ break;
+ }
+}
+
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
struct iwl_trans *trans = dev_id;
@@ -1936,7 +1984,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
/*
* We can restock, since firmware configured
* the RFH
@@ -1947,6 +1995,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
handled |= CSR_INT_BIT_ALIVE;
}
+ if (inta & CSR_INT_BIT_RESET_DONE) {
+ iwl_trans_pcie_handle_reset_interrupt(trans);
+ handled |= CSR_INT_BIT_RESET_DONE;
+ }
+
/* Safely ignore these bits for debug checks below */
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
@@ -1968,7 +2021,12 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
IWL_ERR(trans, "Microcode SW error detected. "
" Restarting 0x%X.\n", inta);
isr_stats->sw++;
- iwl_pcie_irq_handle_error(trans);
+ if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
+ trans_pcie->fw_reset_state = FW_RESET_ERROR;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ } else {
+ iwl_pcie_irq_handle_error(trans);
+ }
handled |= CSR_INT_BIT_SW_ERR;
}
@@ -2074,7 +2132,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
iwl_enable_rfkill_int(trans);
/* Re-enable the ALIVE / Rx interrupt if it occurred */
else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
- iwl_enable_fw_load_int_ctx_info(trans);
+ iwl_enable_fw_load_int_ctx_info(trans, false);
spin_unlock_bh(&trans_pcie->irq_lock);
}
@@ -2289,7 +2347,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
else
sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
@@ -2297,9 +2355,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
inta_hw);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_trans_pcie_reset(trans,
- IWL_RESET_MODE_PROD_RESET);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ trans->request_top_reset = 1;
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR);
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_FATAL_ERROR);
+ }
}
/* Error detected by uCode */
@@ -2338,7 +2400,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
/* We can restock, since firmware configured the RFH */
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
}
@@ -2388,11 +2450,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
iwl_pcie_irq_handle_error(trans);
}
- if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
- IWL_DEBUG_ISR(trans, "Reset flow completed\n");
- trans_pcie->fw_reset_state = FW_RESET_OK;
- wake_up(&trans_pcie->fw_reset_waitq);
- }
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)
+ iwl_trans_pcie_handle_reset_interrupt(trans);
if (!polling)
iwl_pcie_clear_irq(trans, entry->entry);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 472f26f83ba8..38ad719161e6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,7 +6,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
#include "internal.h"
#include "fw/dbg.h"
@@ -81,13 +81,13 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
- iwl_trans_sw_reset(trans, false);
+ iwl_trans_pcie_sw_reset(trans, false);
/*
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
else
@@ -102,10 +102,10 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
trans_pcie->fw_reset_state = FW_RESET_REQUESTED;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
- else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
else
@@ -117,13 +117,23 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
trans_pcie->fw_reset_state != FW_RESET_REQUESTED,
FW_RESET_TIMEOUT);
if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) {
- u32 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ bool reset_done;
+ u32 inta_hw;
+
+ if (trans_pcie->msix_enabled) {
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ reset_done =
+ inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
+ } else {
+ inta_hw = iwl_read32(trans, CSR_INT_MASK);
+ reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
+ }
IWL_ERR(trans,
- "timeout waiting for FW reset ACK (inta_hw=0x%x)\n",
- inta_hw);
+ "timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n",
+ inta_hw, reset_done);
- if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) {
+ if (!reset_done) {
struct iwl_fw_error_dump_mode mode = {
.type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
.context = IWL_ERR_CONTEXT_FROM_OPMODE,
@@ -147,7 +157,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
return;
if (trans->state >= IWL_TRANS_FW_STARTED &&
- trans_pcie->fw_reset_handshake) {
+ trans->conf.fw_reset_handshake) {
/*
* Reset handshake can dump firmware on timeout, but that
* should assume that the firmware is already dead.
@@ -181,8 +191,8 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans, false);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_v2_free(trans, false);
else
iwl_pcie_ctxt_info_free(trans);
@@ -190,7 +200,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
iwl_pcie_gen2_apm_stop(trans, false);
/* re-take ownership to prevent other users from stealing the device */
- iwl_trans_sw_reset(trans, true);
+ iwl_trans_pcie_sw_reset(trans, true);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@@ -243,7 +253,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
int ret;
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
@@ -260,7 +270,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -281,7 +291,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
if (buf[0])
return;
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
pos = scnprintf(buf, buflen, "JF");
break;
@@ -305,7 +315,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
break;
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
if (SILICON_Z_STEP ==
- CSR_HW_RFID_STEP(trans->hw_rf_id))
+ CSR_HW_RFID_STEP(trans->info.hw_rf_id))
pos = scnprintf(buf, buflen, "WHTC");
else
pos = scnprintf(buf, buflen, "WH");
@@ -314,7 +324,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
return;
}
- switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
@@ -337,7 +347,7 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
}
pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
- trans->hw_rf_id);
+ trans->info.hw_rf_id);
IWL_INFO(trans, "Detected RF %s\n", buf);
@@ -364,8 +374,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- iwl_pcie_ctxt_info_gen3_free(trans, true);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_pcie_ctxt_info_v2_free(trans, true);
else
iwl_pcie_ctxt_info_free(trans);
@@ -379,6 +389,11 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
iwl_pcie_get_rf_name(trans);
mutex_unlock(&trans_pcie->mutex);
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans->step_urm = !!(iwl_read_umac_prph(trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
}
static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
@@ -398,21 +413,21 @@ static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
* initialize the LTR to ~250 usec (see ltr_val above).
* The firmware initializes this again later (to a smaller value).
*/
- if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
- !trans->trans_cfg->integrated) {
+ if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+ trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+ !trans->mac_cfg->integrated) {
iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
return true;
}
- if (trans->trans_cfg->integrated &&
- trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+ if (trans->mac_cfg->integrated &&
+ trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
return true;
}
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
/* First clear the interrupt, just in case */
iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
MSIX_HW_INT_CAUSES_REG_IML);
@@ -469,16 +484,22 @@ static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)
}
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill, keep_ram_busy;
+ bool top_reset_done = false;
int ret;
+ mutex_lock(&trans_pcie->mutex);
+again:
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- return -EIO;
+ ret = -EIO;
+ goto out;
}
iwl_enable_rfkill_int(trans);
@@ -495,8 +516,6 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* Make sure it finished running */
iwl_pcie_synchronize_irqs(trans);
- mutex_lock(&trans_pcie->mutex);
-
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill) {
@@ -526,22 +545,37 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
- else
- ret = iwl_pcie_ctxt_info_init(trans, fw);
- if (ret)
- goto out;
+ if (WARN_ON(trans->do_top_reset &&
+ trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC))
+ return -EINVAL;
+
+ /* we need to wait later - set state */
+ if (trans->do_top_reset)
+ trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED;
+
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (!top_reset_done) {
+ ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img);
+ if (ret)
+ goto out;
+ }
+
+ iwl_pcie_ctxt_info_v2_kick(trans);
+ } else {
+ ret = iwl_pcie_ctxt_info_init(trans, img);
+ if (ret)
+ goto out;
+ }
keep_ram_busy = !iwl_pcie_set_ltr(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
iwl_read32(trans, CSR_FUNC_SCRATCH));
iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_ROM_START);
- } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
} else {
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
@@ -550,6 +584,38 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
if (keep_ram_busy)
iwl_pcie_spin_for_iml(trans);
+ if (trans->do_top_reset) {
+ trans->do_top_reset = 0;
+
+#define FW_TOP_RESET_TIMEOUT (HZ / 4)
+ ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
+ trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED,
+ FW_TOP_RESET_TIMEOUT);
+
+ if (trans_pcie->fw_reset_state != FW_RESET_OK) {
+ if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED)
+ IWL_ERR(trans,
+ "TOP reset interrupted by error (state %d)!\n",
+ trans_pcie->fw_reset_state);
+ else
+ IWL_ERR(trans, "TOP reset timed out!\n");
+ iwl_op_mode_nic_error(trans->op_mode,
+ IWL_ERR_TYPE_TOP_RESET_FAILED);
+ iwl_trans_schedule_reset(trans,
+ IWL_ERR_TYPE_TOP_RESET_FAILED);
+ ret = -EIO;
+ goto out;
+ }
+
+ msleep(10);
+ IWL_INFO(trans, "TOP reset successful, reinit now\n");
+ /* now load the firmware again properly */
+ trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &=
+ ~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET);
+ top_reset_done = true;
+ goto again;
+ }
+
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 102a6123bba0..cc4d289b110d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -28,7 +28,7 @@
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
-#include "iwl-context-info-gen3.h"
+#include "iwl-context-info-v2.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
@@ -131,7 +131,7 @@ out:
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_SW_RESET);
usleep_range(10000, 20000);
@@ -237,7 +237,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
- if (trans->cfg->apmg_not_supported)
+ if (trans->mac_cfg->base->apmg_not_supported)
return;
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -293,7 +293,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
@@ -317,7 +317,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
- if (trans->trans_cfg->base_params->pll_cfg)
+ if (trans->mac_cfg->base->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
ret = iwl_finish_nic_init(trans);
@@ -353,7 +353,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* bits do not disable clocks. This preserves any hardware
* bits already set by default in "CLK_CTRL_REG" after reset.
*/
- if (!trans->cfg->apmg_not_supported) {
+ if (!trans->mac_cfg->base->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
@@ -469,7 +469,7 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
/* stop device's busmaster DMA activity */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
@@ -501,10 +501,10 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_pcie_apm_init(trans);
/* inform ME that we are leaving */
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->trans_cfg->device_family >=
+ else if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000) {
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
@@ -565,7 +565,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
return -ENOMEM;
}
- if (trans->trans_cfg->base_params->shadow_reg_enable) {
+ if (trans->mac_cfg->base->shadow_reg_enable) {
/* enable shadow regs in HW */
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
@@ -634,7 +634,7 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
IWL_DEBUG_INFO(trans,
"Couldn't prepare the card but SAP is connected\n");
trans->csme_own = true;
- if (trans->trans_cfg->device_family !=
+ if (trans->mac_cfg->device_family !=
IWL_DEVICE_FAMILY_9000)
IWL_ERR(trans,
"SAP not supported for this NIC family\n");
@@ -819,7 +819,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF);
@@ -977,7 +977,7 @@ monitor:
if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
fw_mon->physical >> dest->base_shift);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(fw_mon->physical + fw_mon->size -
256) >> dest->end_shift);
@@ -1153,7 +1153,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
*/
iwl_pcie_map_list(trans, causes_list_common,
ARRAY_SIZE(causes_list_common), val);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_pcie_map_list(trans, causes_list_bz,
ARRAY_SIZE(causes_list_bz), val);
else
@@ -1175,7 +1175,7 @@ static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
* the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0));
- for (idx = 1; idx < trans->num_rx_queues; idx++) {
+ for (idx = 1; idx < trans->info.num_rxqs; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
@@ -1196,7 +1196,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
- if (trans->trans_cfg->mq_rx_supported &&
+ if (trans->mac_cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
@@ -1271,7 +1271,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
- if (!trans->cfg->apmg_not_supported) {
+ if (!trans->mac_cfg->base->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
@@ -1279,7 +1279,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
}
/* Make sure (redundant) we've released our request to stay awake */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
@@ -1337,7 +1337,9 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
}
int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+ const struct iwl_fw *fw,
+ const struct fw_img *img,
+ bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
@@ -1408,10 +1410,10 @@ int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
- ret = iwl_pcie_load_given_ucode_8000(trans, fw);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ ret = iwl_pcie_load_given_ucode_8000(trans, img);
else
- ret = iwl_pcie_load_given_ucode(trans, fw);
+ ret = iwl_pcie_load_given_ucode(trans, img);
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
@@ -1423,10 +1425,10 @@ out:
return ret;
}
-void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
{
iwl_pcie_reset_ict(trans);
- iwl_pcie_tx_start(trans, scd_addr);
+ iwl_pcie_tx_start(trans);
}
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
@@ -1485,7 +1487,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
- !WARN_ON(trans->trans_cfg->gen2))
+ !WARN_ON(trans->mac_cfg->gen2))
_iwl_trans_pcie_stop_device(trans, from_irq);
}
@@ -1505,7 +1507,7 @@ static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -1534,11 +1536,11 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
UREG_DOORBELL_TO_ISR6_RESUME);
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
CSR_IPC_SLEEP_CONTROL_RESUME);
@@ -1593,7 +1595,7 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
goto out;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
@@ -1653,17 +1655,18 @@ out:
static void
iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans *trans,
- const struct iwl_cfg_trans_params *cfg_trans)
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
- if (!cfg_trans->mq_rx_supported)
+ if (!mac_cfg->mq_rx_supported)
goto enable_msi;
- if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
+ if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
@@ -1693,27 +1696,28 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
* More than two interrupts: we will use fewer RSS queues.
*/
if (num_irqs <= max_irqs - 2) {
- trans_pcie->trans->num_rx_queues = num_irqs + 1;
+ info->num_rxqs = num_irqs + 1;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
IWL_SHARED_IRQ_FIRST_RSS;
} else if (num_irqs == max_irqs - 1) {
- trans_pcie->trans->num_rx_queues = num_irqs;
+ info->num_rxqs = num_irqs;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
} else {
- trans_pcie->trans->num_rx_queues = num_irqs - 1;
+ info->num_rxqs = num_irqs - 1;
}
IWL_DEBUG_INFO(trans,
"MSI-X enabled with rx queues %d, vec mask 0x%x\n",
- trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
+ info->num_rxqs, trans_pcie->shared_vec_mask);
- WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
+ WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES);
trans_pcie->alloc_vecs = num_irqs;
trans_pcie->msix_enabled = true;
return;
enable_msi:
+ info->num_rxqs = 1;
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
@@ -1726,14 +1730,15 @@ enable_msi:
}
}
-static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans,
+ struct iwl_trans_info *info)
{
#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
- iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+ iter_rx_q = info->num_rxqs - 1 + i;
offset = 1 + i;
for (; i < iter_rx_q ; i++) {
/*
@@ -1753,7 +1758,8 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
- struct iwl_trans_pcie *trans_pcie)
+ struct iwl_trans_pcie *trans_pcie,
+ struct iwl_trans_info *info)
{
int i;
@@ -1782,7 +1788,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
return ret;
}
}
- iwl_pcie_irq_set_affinity(trans_pcie->trans);
+ iwl_pcie_irq_set_affinity(trans_pcie->trans, info);
return 0;
}
@@ -1791,7 +1797,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
{
u32 hpm, wprot;
- switch (trans->trans_cfg->device_family) {
+ switch (trans->mac_cfg->device_family) {
case IWL_DEVICE_FAMILY_9000:
wprot = PREG_PRPH_WPROT_9000;
break;
@@ -1860,8 +1866,8 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
if (err)
return err;
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
- trans->trans_cfg->integrated) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->mac_cfg->integrated) {
err = iwl_pcie_gen2_force_power_gating(trans);
if (err)
return err;
@@ -1936,7 +1942,7 @@ u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -1960,45 +1966,17 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
+void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
- trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
- trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
- trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
- trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
-
- if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
- trans_pcie->n_no_reclaim_cmds = 0;
- else
- trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
- if (trans_pcie->n_no_reclaim_cmds)
- memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
- trans_pcie->n_no_reclaim_cmds * sizeof(u8));
-
- trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
- iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ iwl_trans_get_rb_size_order(trans->conf.rx_buf_size);
trans_pcie->rx_buf_bytes =
- iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
- trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
-
- trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
- trans_pcie->scd_set_active = trans_cfg->scd_set_active;
-
- trans->command_groups = trans_cfg->command_groups;
- trans->command_groups_size = trans_cfg->command_groups_size;
-
-
- trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
+ iwl_trans_get_rb_size(trans->conf.rx_buf_size);
}
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
@@ -2026,11 +2004,14 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions
static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
{
- iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd);
}
static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_header_wide bad_cmd = {
.cmd = INVALID_WR_PTR_CMD,
.group_id = DEBUG_GROUP,
@@ -2040,11 +2021,11 @@ static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
};
int ret;
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd,
sizeof(bad_cmd));
if (ret)
return ret;
- memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
+ memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
return 0;
}
@@ -2055,7 +2036,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans);
@@ -2348,6 +2329,7 @@ out:
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_trans_pcie_removal *removal;
char _msg = 0, *msg = &_msg;
@@ -2358,9 +2340,9 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return;
- if (trans->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
+ if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
mode = IWL_RESET_MODE_FUNC_RESET;
- if (trans->me_present < 0)
+ if (trans_pcie->me_present < 0)
msg = " instead of product reset as ME may be present";
else
msg = " instead of product reset as ME is present";
@@ -2395,7 +2377,7 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
removal->pdev = to_pci_dev(trans->dev);
removal->mode = mode;
- removal->integrated = trans->trans_cfg->integrated;
+ removal->integrated = trans->mac_cfg->integrated;
INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
pci_dev_get(removal->pdev);
schedule_work(&removal->work);
@@ -2423,7 +2405,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
if (trans_pcie->cmd_hold_nic_awake)
goto out;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
@@ -2431,7 +2413,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -2518,7 +2500,7 @@ iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
if (trans_pcie->cmd_hold_nic_awake)
goto out;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
@@ -2617,7 +2599,7 @@ int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+ if (queue >= trans->info.num_rxqs || !trans_pcie->rxq)
return -EINVAL;
data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
@@ -2698,10 +2680,10 @@ int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0;
- cnt < trans->trans_cfg->base_params->num_of_queues;
+ cnt < trans->mac_cfg->base->num_of_queues;
cnt++) {
- if (cnt == trans_pcie->txqs.cmd.q_id)
+ if (cnt == trans->conf.cmd_queue)
continue;
if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
@@ -2842,7 +2824,7 @@ static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state;
- if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
return NULL;
state = kmalloc(sizeof(*state), GFP_KERNEL);
@@ -2860,7 +2842,7 @@ static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
*pos = ++state->pos;
- if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
+ if (*pos >= priv->trans->mac_cfg->base->num_of_queues)
return NULL;
return state;
@@ -2892,7 +2874,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans_pcie->txqs.cmd.q_id)
+ if (state->pos == trans->conf.cmd_queue)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@@ -2930,7 +2912,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
int pos = 0, i, ret;
size_t bufsz;
- bufsz = sizeof(char) * 121 * trans->num_rx_queues;
+ bufsz = sizeof(char) * 121 * trans->info.num_rxqs;
if (!trans_pcie->rxq)
return -EAGAIN;
@@ -2939,9 +2921,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
if (!buf)
return -ENOMEM;
- for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+ for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+ spin_lock_bh(&rxq->lock);
+
pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
i);
pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
@@ -2962,6 +2946,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
}
+ spin_unlock_bh(&rxq->lock);
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -3266,8 +3251,9 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
static const char * const modes[] = {
- [IWL_RESET_MODE_SW_RESET] = "n/a",
- [IWL_RESET_MODE_REPROBE] = "n/a",
+ [IWL_RESET_MODE_SW_RESET] = "sw",
+ [IWL_RESET_MODE_REPROBE] = "reprobe",
+ [IWL_RESET_MODE_TOP_RESET] = "top",
[IWL_RESET_MODE_REMOVE_ONLY] = "remove",
[IWL_RESET_MODE_RESCAN] = "rescan",
[IWL_RESET_MODE_FUNC_RESET] = "function",
@@ -3286,8 +3272,18 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file,
if (mode < 0)
return mode;
- if (mode < IWL_RESET_MODE_REMOVE_ONLY)
- return -EINVAL;
+ if (mode < IWL_RESET_MODE_REMOVE_ONLY) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return -EINVAL;
+ if (mode == IWL_RESET_MODE_TOP_RESET) {
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ return -EINVAL;
+ trans->request_top_reset = 1;
+ }
+ iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS);
+ iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS);
+ return count;
+ }
iwl_trans_pcie_reset(trans, mode);
@@ -3428,7 +3424,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
(*data)->len = cpu_to_le32(fh_regs_len);
val = (void *)(*data)->data;
- if (!trans->trans_cfg->gen2)
+ if (!trans->mac_cfg->gen2)
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
@@ -3475,7 +3471,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
{
u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
write_ptr = DBGC_CUR_DBGBUF_STATUS;
@@ -3495,7 +3491,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, base));
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
fw_mon_data->fw_mon_base_high_ptr =
cpu_to_le32(iwl_read_prph(trans, base_high));
write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
@@ -3515,8 +3511,8 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
if (trans->dbg.dest_tlv ||
(fw_mon->size &&
- (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
+ (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3539,14 +3535,14 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
IWL_LDBG_M2S_BUF_BA_MSK) <<
trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
- base += trans->cfg->smem_offset;
+ base += trans->mac_cfg->base->smem_offset;
} else {
base = iwl_read_prph(trans, base) <<
trans->dbg.dest_tlv->base_shift;
}
- iwl_trans_read_mem(trans, base, fw_mon_data->data,
- monitor_len / sizeof(u32));
+ iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data,
+ monitor_len / sizeof(u32));
} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
monitor_len =
iwl_trans_pci_dump_marbh_monitor(trans,
@@ -3580,7 +3576,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
- base += trans->cfg->smem_offset;
+ base += trans->mac_cfg->base->smem_offset;
monitor_len =
(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
@@ -3596,7 +3592,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
trans->dbg.dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->trans_cfg->device_family >=
+ if (trans->mac_cfg->device_family >=
IWL_DEVICE_FAMILY_8000 ||
trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg.dest_tlv->end_shift);
@@ -3617,13 +3613,13 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->trans_cfg->mq_rx_supported &&
+ !trans->mac_cfg->mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
if (!dump_mask)
@@ -3648,7 +3644,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
/* FH registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
len += sizeof(*data) +
(iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
@@ -3662,15 +3658,18 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
+ spin_lock_bh(&rxq->lock);
num_rbs = iwl_get_closed_rb_stts(trans, rxq);
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
+ spin_unlock_bh(&rxq->lock);
+
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
(PAGE_SIZE << trans_pcie->rx_page_order));
}
/* Paged memory for gen2 HW */
- if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+ if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3695,7 +3694,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
u8 tfdidx;
u32 caplen, cmdlen;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
tfdidx = idx;
else
tfdidx = ptr;
@@ -3735,7 +3734,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->trans_cfg->gen2 &&
+ if (trans->mac_cfg->gen2 &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
@@ -3775,7 +3774,7 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
if (trans_pcie->msix_enabled) {
inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
else
sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
@@ -3787,12 +3786,14 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct iwl_cfg_trans_params *cfg_trans)
+struct iwl_trans *
+iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct iwl_mac_cfg *mac_cfg,
+ struct iwl_trans_info *info)
{
struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
+ unsigned int bc_tbl_n_entries;
int ret, addr_size;
u32 bar0;
@@ -3809,13 +3810,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
return ERR_PTR(ret);
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
- cfg_trans);
+ mac_cfg);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans->trans_cfg->gen2) {
+ /* Initialize the wait queue for commands */
+ init_waitqueue_head(&trans_pcie->wait_command_queue);
+
+ if (trans->mac_cfg->gen2) {
trans_pcie->txqs.tfd.addr_size = 64;
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
@@ -3824,10 +3828,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
}
- trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
- /* Set a short watchdog for the command queue */
- trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT;
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11);
+
+ info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
@@ -3835,20 +3841,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_free_trans;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
+ else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
else
- trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
+
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
* allocate here.
*/
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
trans_pcie->txqs.bc_pool =
dmam_pool_create("iwlwifi:bc", trans->dev,
trans_pcie->txqs.bc_tbl_size,
@@ -3861,7 +3868,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Some things must not change even if the config does */
WARN_ON(trans_pcie->txqs.tfd.addr_size !=
- (trans->trans_cfg->gen2 ? 64 : 36));
+ (trans->mac_cfg->gen2 ? 64 : 36));
/* Initialize NAPI here - it should be before registering to mac80211
* in the opmode but after the HW struct is allocated.
@@ -3895,7 +3902,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->debug_rfkill = -1;
- if (!cfg_trans->base_params->pcie_l1_allowed) {
+ if (!mac_cfg->base->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
* if we don't want to stay in L1 all the time. This wastes a
@@ -3939,8 +3946,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
- trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
- if (trans->hw_rev == 0xffffffff) {
+ info->hw_rev = iwl_read32(trans, CSR_HW_REV);
+ if (info->hw_rev == 0xffffffff) {
dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
ret = -EIO;
goto out_no_pci;
@@ -3952,17 +3959,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* "dash" value). To keep hw_rev backwards compatible - we'll store it
* in the old format.
*/
- if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
- trans->hw_rev_step = trans->hw_rev & 0xF;
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ info->hw_rev_step = info->hw_rev & 0xF;
else
- trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
+ info->hw_rev_step = (info->hw_rev & 0xC) >> 2;
- IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
+ IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev);
- iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
- trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
- snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
- "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+ iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info);
init_waitqueue_head(&trans_pcie->sx_waitq);
@@ -3971,7 +3975,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_no_pci;
if (trans_pcie->msix_enabled) {
- ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
+ ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info);
if (ret)
goto out_no_pci;
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 71227fd3dac0..df0545f09da9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -18,13 +18,12 @@
static struct page *get_workaround_page(struct iwl_trans *trans,
struct sk_buff *skb)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_page_info *info;
struct page **page_ptr;
struct page *ret;
dma_addr_t phys;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
ret = alloc_page(GFP_ATOMIC);
if (!ret)
@@ -164,7 +163,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -491,21 +490,21 @@ struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
bool amsdu;
/* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
+ offsetofend(struct iwl_tx_cmd_v9, dram_info) >
IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
+ offsetofend(struct iwl_tx_cmd, dram_info) >
IWL_FIRST_TB_SIZE);
memset(tfd, 0, sizeof(*tfd));
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_v9);
else
- len = sizeof(struct iwl_tx_cmd_gen3);
+ len = sizeof(struct iwl_tx_cmd);
amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
@@ -536,17 +535,17 @@ int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
* If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
max = q->n_window;
else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+ max = trans->mac_cfg->base->max_tfd_queue_size - 1;
/*
* max_tfd_queue_size is a power of 2, so the following is equivalent to
* modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ (trans->mac_cfg->base->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -561,8 +560,8 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
@@ -582,24 +581,16 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans_pcie->txqs.bc_table_dword);
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans_pcie->txqs.bc_table_dword);
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
+
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
}
static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
@@ -756,7 +747,8 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -785,16 +777,16 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd *tx_cmd =
(void *)dev_cmd->payload;
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ cmd_len = le16_to_cpu(tx_cmd->len);
} else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ struct iwl_tx_cmd_v9 *tx_cmd_v9 =
(void *)dev_cmd->payload;
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ cmd_len = le16_to_cpu(tx_cmd_v9->len);
}
/* Set up entry for this TFD in Tx byte-count array */
@@ -832,7 +824,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ if (txq_id != trans->conf.cmd_queue) {
int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
struct sk_buff *skb = txq->entries[idx].skb;
@@ -906,7 +898,7 @@ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
iwl_txq_gen2_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->txqs.cmd.q_id)
+ if (txq_id == trans->conf.cmd_queue)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -1007,7 +999,7 @@ static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+ wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
@@ -1043,8 +1035,8 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
/* but must be power of 2 values for calculating read/write pointers */
size = rounddown_pow_of_two(size);
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
- trans->hw_rev_step == SILICON_A_STEP) {
+ if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->info.hw_rev_step == SILICON_A_STEP) {
size = 4096;
txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
} else {
@@ -1064,7 +1056,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
if (IS_ERR(txq))
return PTR_ERR(txq);
- if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) {
+ if (trans->conf.queue_alloc_cmd_ver == 0) {
memset(&cmd.old, 0, sizeof(cmd.old));
cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
@@ -1081,7 +1073,7 @@ int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
hcmd.id = SCD_QUEUE_CFG;
hcmd.len[0] = sizeof(cmd.old);
hcmd.data[0] = &cmd.old;
- } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) {
+ } else if (trans->conf.queue_alloc_cmd_ver == 3) {
memset(&cmd.new, 0, sizeof(cmd.new));
cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
@@ -1176,7 +1168,7 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
}
ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans_pcie->txqs.cmd.q_id));
+ (txq_id == trans->conf.cmd_queue));
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
@@ -1206,7 +1198,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1323,7 +1315,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1371,7 +1363,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9fc4e98693eb..7abd7c7daa89 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -78,7 +78,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -90,8 +89,8 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 2. NIC is woken up for CMD regardless of shadow outside this function
* 3. there is a chance that the NIC is asleep
*/
- if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans_pcie->txqs.cmd.q_id &&
+ if (!trans->mac_cfg->base->shadow_reg_enable &&
+ txq_id != trans->conf.cmd_queue &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -125,7 +124,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[i];
if (!test_bit(i, trans_pcie->txqs.queue_used))
@@ -193,7 +192,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
+ if (!trans->mac_cfg->base->apmg_wake_up_wa)
return;
spin_lock(&trans_pcie->reg_lock);
@@ -226,11 +225,10 @@ static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page **page_ptr;
struct page *next;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
next = *page_ptr;
*page_ptr = NULL;
@@ -280,10 +278,12 @@ iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
struct iwl_tfd *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
tfd->num_tbs = 0;
- iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
+ iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans_pcie->invalid_tx_cmd.dma,
+ trans_pcie->invalid_tx_cmd.size);
}
static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
@@ -355,7 +355,7 @@ static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, read_ptr));
else
@@ -394,7 +394,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ if (txq_id != trans->conf.cmd_queue) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
struct iwl_cmd_meta *cmd_meta =
&txq->entries[txq->read_ptr].meta;
@@ -408,7 +408,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans_pcie->txqs.cmd.q_id)
+ txq_id == trans->conf.cmd_queue)
iwl_pcie_clear_cmd_in_flight(trans);
}
@@ -446,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->txqs.cmd.q_id)
+ if (txq_id == trans->conf.cmd_queue)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -456,7 +456,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->txqs.tfd.size *
- trans->trans_cfg->base_params->max_tfd_queue_size,
+ trans->mac_cfg->base->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -475,10 +475,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
memset(txq, 0, sizeof(*txq));
}
-void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+void iwl_pcie_tx_start(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int nq = trans->trans_cfg->base_params->num_of_queues;
+ int nq = trans->mac_cfg->base->num_of_queues;
int chan;
u32 reg_val;
int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
@@ -493,13 +493,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
- WARN_ON(scd_base_addr != 0 &&
- scd_base_addr != trans_pcie->scd_base_addr);
-
/* reset context data, TX status and translation data */
- iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_MEM_LOWER_BOUND,
- NULL, clear_dwords);
+ iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->txqs.scd_bc_tbls.dma >> 10);
@@ -507,12 +504,12 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- if (trans->trans_cfg->base_params->scd_chain_ext_wa)
+ if (trans->mac_cfg->base->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
- trans_pcie->txqs.cmd.fifo,
- trans_pcie->txqs.cmd.wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans->conf.cmd_queue,
+ trans->conf.cmd_fifo,
+ IWL_DEF_WD_TIMEOUT);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -529,7 +526,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
@@ -543,13 +540,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
* we should never get here in gen2 trans mode return early to avoid
* having invalid accesses
*/
- if (WARN_ON_ONCE(trans->trans_cfg->gen2))
+ if (WARN_ON_ONCE(trans->mac_cfg->gen2))
return;
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->dma_addr);
@@ -571,7 +568,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
* while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
* contain garbage.
*/
- iwl_pcie_tx_start(trans, 0);
+ iwl_pcie_tx_start(trans);
}
static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
@@ -633,7 +630,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
return 0;
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++)
iwl_pcie_txq_unmap(trans, txq_id);
@@ -656,7 +653,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
/* Tx queues */
if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->trans_cfg->base_params->num_of_queues;
+ txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
trans_pcie->txqs.txq[txq_id] = NULL;
@@ -678,7 +675,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
bool active;
u8 fifo;
- if (trans->trans_cfg->gen2) {
+ if (trans->mac_cfg->gen2) {
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
@@ -695,9 +692,9 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ (trans->mac_cfg->base->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ (trans->mac_cfg->base->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
@@ -723,8 +720,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t num_entries = trans->trans_cfg->gen2 ?
- slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t num_entries = trans->mac_cfg->gen2 ?
+ slots_num : trans->mac_cfg->base->max_tfd_queue_size;
size_t tfd_sz;
size_t tb0_buf_sz;
int i;
@@ -779,7 +776,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
for (i = 0; i < num_entries; i++) {
void *tfd = iwl_txq_get_tfd(trans, txq, i);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_set_tfd_invalid_gen2(trans, tfd);
else
iwl_txq_set_tfd_invalid_gen1(trans, tfd);
@@ -799,6 +796,8 @@ error:
return -ENOMEM;
}
+#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+
/*
* iwl_pcie_tx_alloc - allocate TX context
* Allocate all Tx DMA structures and initialize them
@@ -808,12 +807,12 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
+ u16 bc_tbls_size = trans->mac_cfg->base->num_of_queues;
- if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
+ if (WARN_ON(trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
return -EINVAL;
- bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
+ bc_tbls_size *= BC_TABLE_SIZE;
/*It is not allowed to alloc twice, so warn when this happens.
* We cannot rely on the previous allocation, so free and fail */
@@ -837,7 +836,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
trans_pcie->txq_memory =
- kcalloc(trans->trans_cfg->base_params->num_of_queues,
+ kcalloc(trans->mac_cfg->base->num_of_queues,
sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
@@ -846,16 +845,16 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_ba_txq_size);
+ trans->mac_cfg->base->min_ba_txq_size);
trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
slots_num, cmd_queue);
@@ -905,7 +904,7 @@ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
+ trans->mac_cfg->base->max_tfd_queue_size;
int ret;
txq->need_update = false;
@@ -963,16 +962,16 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
spin_unlock_bh(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->mac_cfg->base->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans->conf.cmd_queue);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
- trans->cfg->min_txq_size);
+ trans->mac_cfg->base->min_txq_size);
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
- trans->cfg->min_ba_txq_size);
+ trans->mac_cfg->base->min_ba_txq_size);
ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
@@ -991,7 +990,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
- if (trans->trans_cfg->base_params->num_of_queues > 20)
+ if (trans->mac_cfg->base->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1012,7 +1011,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
+ if (!trans->mac_cfg->base->apmg_wake_up_wa)
return 0;
/*
@@ -1090,12 +1089,12 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
idx = iwl_txq_get_cmd_index(txq, idx);
r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
+ if (idx >= trans->mac_cfg->base->max_tfd_queue_size ||
(!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
- trans->trans_cfg->base_params->max_tfd_queue_size,
+ trans->mac_cfg->base->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
@@ -1164,15 +1163,15 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans_pcie->txqs.cmd.q_id &&
- trans_pcie->scd_set_active)
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
iwl_scd_enable_set_active(trans, 0);
/* Stop this Tx queue before configuring it */
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans_pcie->txqs.cmd.q_id)
+ if (txq_id != trans->conf.cmd_queue)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -1206,7 +1205,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
* this sad hardware issue.
* This bug has been fixed on devices 9000 and up.
*/
- scd_bug = !trans->trans_cfg->mq_rx_supported &&
+ scd_bug = !trans->mac_cfg->mq_rx_supported &&
!((ssn - txq->write_ptr) & 0x3f) &&
(ssn != txq->write_ptr);
if (scd_bug)
@@ -1242,8 +1241,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans_pcie->txqs.cmd.q_id &&
- trans_pcie->scd_set_active)
+ if (txq_id == trans->conf.cmd_queue &&
+ trans->conf.scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
IWL_DEBUG_TX_QUEUES(trans,
@@ -1293,8 +1292,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
- ARRAY_SIZE(zero_val));
+ iwl_trans_pcie_write_mem(trans, stts_addr,
+ (const void *)zero_val,
+ ARRAY_SIZE(zero_val));
}
iwl_pcie_txq_unmap(trans, txq_id);
@@ -1310,10 +1310,10 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (i == trans_pcie->txqs.cmd.q_id)
+ if (i == trans->conf.cmd_queue)
continue;
/* we skip the command queue (obviously) so it's OK to nest */
@@ -1346,7 +1346,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1361,7 +1361,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
unsigned long flags;
- if (WARN(!trans->wide_cmd_header &&
+ if (WARN(!trans->conf.wide_cmd_header &&
group_id > IWL_ALWAYS_LONG_GROUP,
"unsupported wide command %#x\n", cmd->id))
return -EINVAL;
@@ -1475,7 +1475,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1483,7 +1483,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1534,7 +1534,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1633,14 +1633,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
+ if (WARN(txq_id != trans->conf.cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
+ txq_id, trans->conf.cmd_queue, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1654,7 +1654,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
group_id = cmd->hdr.group_id;
cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
iwl_txq_gen2_tfd_unmap(trans, meta,
iwl_txq_get_tfd(trans, txq, index));
else
@@ -1683,7 +1683,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
iwl_get_cmd_string(trans, cmd_id));
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
}
meta->flags = 0;
@@ -1753,7 +1753,7 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
dma_addr_t phys;
void *ret;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
if (WARN_ON(*page_ptr))
return NULL;
@@ -1912,7 +1912,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 tb1_len)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -2067,14 +2067,14 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
int num_tbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ struct iwl_bc_tbl_entry *scd_bc_tbl;
int write_ptr = txq->write_ptr;
int txq_id = txq->id;
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
@@ -2092,7 +2092,8 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
- if (trans_pcie->txqs.bc_table_dword)
+
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
@@ -2100,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(len | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
bc_ent;
}
@@ -2112,7 +2113,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
- struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq;
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
@@ -2153,7 +2154,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -2184,7 +2186,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
+ offsetof(struct iwl_tx_cmd_v6, scratch);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -2199,7 +2201,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
- len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+ len = sizeof(struct iwl_tx_cmd_v6) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
amsdu = ieee80211_is_data_qos(fc) &&
@@ -2222,9 +2224,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
IWL_FIRST_TB_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd, scratch) >
+ offsetofend(struct iwl_tx_cmd_v6, scratch) >
IWL_FIRST_TB_SIZE);
/* map the data for TB1 */
@@ -2312,24 +2314,24 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
int txq_id = txq->id;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != trans_pcie->txqs.cmd.q_id)
+ if (txq_id != trans->conf.cmd_queue)
sta_id = tx_cmd->sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
bc_ent;
}
@@ -2343,7 +2345,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
int txq_read_ptr, txq_write_ptr;
/* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
+ if (WARN_ON(txq_id == trans->conf.cmd_queue))
return;
if (WARN_ON(!txq))
@@ -2385,7 +2387,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
IWL_ERR(trans,
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
- trans->trans_cfg->base_params->max_tfd_queue_size,
+ trans->mac_cfg->base->max_tfd_queue_size,
txq_write_ptr, txq_read_ptr);
iwl_op_mode_time_point(trans->op_mode,
@@ -2414,7 +2416,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
txq->entries[read_ptr].skb = NULL;
- if (!trans->trans_cfg->gen2)
+ if (!trans->mac_cfg->gen2)
iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
txq_read_ptr);
@@ -2456,7 +2458,8 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans_pcie->txqs.dev_cmd_offs);
+ trans->conf.cb_data_offs +
+ sizeof(void *));
/*
* Note that we can very well be overflowing again.
@@ -2548,11 +2551,11 @@ next_queue:
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
+ struct iwl_host_cmd *cmd,
+ const char *cmd_str)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
int cmd_idx;
int ret;
@@ -2565,7 +2568,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
else
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
@@ -2578,7 +2581,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
return ret;
}
- ret = wait_event_timeout(trans->wait_command_queue,
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
!test_bit(STATUS_SYNC_HCMD_ACTIVE,
&trans->status),
HOST_COMPLETE_TIMEOUT);
@@ -2594,7 +2597,7 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
cmd_str);
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
@@ -2645,6 +2648,8 @@ cancel:
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+
/* Make sure the NIC is still alive in the bus */
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
@@ -2656,20 +2661,16 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
return -ERFKILL;
}
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3))) {
- IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
- return -EHOSTDOWN;
- }
-
if (cmd->flags & CMD_ASYNC) {
int ret;
+ IWL_DEBUG_INFO(trans, "Sending async command %s\n", cmd_str);
+
/* An asynchronous command can not expect an SKB to be set. */
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
return -EINVAL;
- if (trans->trans_cfg->gen2)
+ if (trans->mac_cfg->gen2)
ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
else
ret = iwl_pcie_enqueue_hcmd(trans, cmd);
@@ -2683,6 +2684,5 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
return 0;
}
- return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
+ return iwl_trans_pcie_send_hcmd_sync(trans, cmd, cmd_str);
}
-IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index d0bda23c628a..784433bb246a 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023-2024 Intel Corporation
+ * Copyright (C) 2023-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <linux/pci.h>
@@ -13,10 +13,50 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
{
- printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
- pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
- di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
- di->cores);
+ u16 subdevice_mask = GENMASK(di->subdevice_m_h, di->subdevice_m_l);
+ char buf[100] = {};
+ int pos = 0;
+
+ if (di->match_rf_type)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_type=%03x", di->rf_type);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_type=*");
+
+ if (di->match_bw_limit)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " bw_limit=%d", di->bw_limit);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " bw_limit=*");
+
+ if (di->match_rf_step)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_step=%c",
+ di->rf_step == SILICON_Z_STEP ? 'Z' :
+ 'A' + di->rf_step);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_step=*");
+
+ if (di->match_rf_id)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_id=0x%x", di->rf_id);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " rf_id=*");
+
+ if (di->match_cdb)
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " cdb=%d", di->cdb);
+ else
+ pos += scnprintf(buf + pos, sizeof(buf) - pos,
+ " cdb=*");
+
+
+ printk(KERN_DEBUG "%sdev=%04x subdev=%04x/%04x%s\n",
+ pfx, di->device, di->subdevice, subdevice_mask, buf);
}
static void devinfo_table_order(struct kunit *test)
@@ -28,11 +68,14 @@ static void devinfo_table_order(struct kunit *test)
const struct iwl_dev_info *ret;
ret = iwl_pci_find_dev_info(di->device, di->subdevice,
- di->mac_type, di->mac_step,
di->rf_type, di->cdb,
- di->jacket, di->rf_id,
- di->no_160, di->cores, di->rf_step);
- if (ret != di) {
+ di->rf_id, di->bw_limit,
+ di->rf_step);
+ if (!ret) {
+ iwl_pci_print_dev_info("No entry found for: ", di);
+ KUNIT_FAIL(test,
+ "No entry found for entry at index %d\n", idx);
+ } else if (ret != di) {
iwl_pci_print_dev_info("searched: ", di);
iwl_pci_print_dev_info("found: ", ret);
KUNIT_FAIL(test,
@@ -42,6 +85,92 @@ static void devinfo_table_order(struct kunit *test)
}
}
+static void devinfo_names(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+
+ KUNIT_ASSERT_TRUE(test, di->name);
+ }
+}
+
+static void devinfo_no_cfg_dups(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_rf_cfg *cfg_i = iwl_dev_info_table[i].cfg;
+
+ for (int j = 0; j < i; j++) {
+ const struct iwl_rf_cfg *cfg_j = iwl_dev_info_table[j].cfg;
+
+ if (cfg_i == cfg_j)
+ continue;
+
+ KUNIT_EXPECT_NE_MSG(test, memcmp(cfg_i, cfg_j,
+ sizeof(*cfg_i)), 0,
+ "identical configs: %ps and %ps\n",
+ cfg_i, cfg_j);
+ }
+ }
+}
+
+static void devinfo_no_name_dups(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ for (int j = 0; j < i; j++) {
+ if (iwl_dev_info_table[i].name == iwl_dev_info_table[j].name)
+ continue;
+
+ KUNIT_EXPECT_NE_MSG(test,
+ strcmp(iwl_dev_info_table[i].name,
+ iwl_dev_info_table[j].name),
+ 0,
+ "name dup: %ps/%ps",
+ iwl_dev_info_table[i].name,
+ iwl_dev_info_table[j].name);
+ }
+ }
+}
+
+static void devinfo_check_subdev_match(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[i];
+ u16 subdevice_mask = GENMASK(di->subdevice_m_h,
+ di->subdevice_m_l);
+
+ /* if BW limit bit is matched then must have a limit */
+ if (di->match_bw_limit == 1 && di->bw_limit == 1)
+ KUNIT_EXPECT_NE(test, di->cfg->bw_limit, 0);
+
+ /* if subdevice is ANY we can have RF ID/BW limit */
+ if (di->subdevice == (u16)IWL_CFG_ANY)
+ continue;
+
+ /* same if the subdevice mask doesn't overlap them */
+ if (IWL_SUBDEVICE_RF_ID(subdevice_mask) == 0 &&
+ IWL_SUBDEVICE_BW_LIM(subdevice_mask) == 0)
+ continue;
+
+ /* but otherwise they shouldn't be used */
+ KUNIT_EXPECT_EQ(test, (int)di->match_rf_id, 0);
+ KUNIT_EXPECT_EQ(test, (int)di->match_bw_limit, 0);
+ }
+}
+
+static void devinfo_check_killer_subdev(struct kunit *test)
+{
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[i];
+
+ if (!strstr(di->name, "Killer"))
+ continue;
+
+ KUNIT_EXPECT_NE(test, di->subdevice, (u16)IWL_CFG_ANY);
+ }
+}
+
static void devinfo_pci_ids(struct kunit *test)
{
struct pci_dev *dev;
@@ -64,9 +193,36 @@ static void devinfo_pci_ids(struct kunit *test)
}
}
+static void devinfo_no_mac_cfg_dups(struct kunit *test)
+{
+ for (int i = 0; iwl_hw_card_ids[i].vendor; i++) {
+ const struct iwl_mac_cfg *cfg_i =
+ (void *)iwl_hw_card_ids[i].driver_data;
+
+ for (int j = 0; j < i; j++) {
+ const struct iwl_mac_cfg *cfg_j =
+ (void *)iwl_hw_card_ids[j].driver_data;
+
+ if (cfg_i == cfg_j)
+ continue;
+
+ KUNIT_EXPECT_NE_MSG(test, memcmp(cfg_j, cfg_i,
+ sizeof(*cfg_i)), 0,
+ "identical configs: %ps and %ps\n",
+ cfg_i, cfg_j);
+ }
+ }
+}
+
static struct kunit_case devinfo_test_cases[] = {
KUNIT_CASE(devinfo_table_order),
+ KUNIT_CASE(devinfo_names),
+ KUNIT_CASE(devinfo_no_cfg_dups),
+ KUNIT_CASE(devinfo_no_name_dups),
+ KUNIT_CASE(devinfo_check_subdev_match),
+ KUNIT_CASE(devinfo_check_killer_subdev),
KUNIT_CASE(devinfo_pci_ids),
+ KUNIT_CASE(devinfo_no_mac_cfg_dups),
{}
};
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index 772084a9bd8d..3baf8ab01e22 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -231,6 +231,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
mutex_lock(&priv->eeprom_mutex);
priv->eeprom = buf;
+ priv->eeprom_slice_size = len;
eeprom_hdr = skb_put(skb, eeprom_hdr_size + len);
if (priv->fw_var < 0x509) {
@@ -253,6 +254,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
ret = -EBUSY;
}
priv->eeprom = NULL;
+ priv->eeprom_slice_size = 0;
mutex_unlock(&priv->eeprom_mutex);
return ret;
}
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 522656de4159..aeb5e40cc5ef 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -258,6 +258,7 @@ struct p54_common {
/* eeprom handling */
void *eeprom;
+ size_t eeprom_slice_size;
struct completion eeprom_comp;
struct mutex eeprom_mutex;
};
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 8414aa208655..2deb1bb54f24 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -496,14 +496,19 @@ static void p54_rx_eeprom_readback(struct p54_common *priv,
return ;
if (priv->fw_var >= 0x509) {
- memcpy(priv->eeprom, eeprom->v2.data,
- le16_to_cpu(eeprom->v2.len));
+ if (le16_to_cpu(eeprom->v2.len) != priv->eeprom_slice_size)
+ return;
+
+ memcpy(priv->eeprom, eeprom->v2.data, priv->eeprom_slice_size);
} else {
- memcpy(priv->eeprom, eeprom->v1.data,
- le16_to_cpu(eeprom->v1.len));
+ if (le16_to_cpu(eeprom->v1.len) != priv->eeprom_slice_size)
+ return;
+
+ memcpy(priv->eeprom, eeprom->v1.data, priv->eeprom_slice_size);
}
priv->eeprom = NULL;
+ priv->eeprom_slice_size = 0;
tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
dev_kfree_skb_any(tmp);
complete(&priv->eeprom_comp);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a099fdaafa45..60c12328c2f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1126,7 +1126,7 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
+ if (mwifiex_sta_init_cmd(priv, false))
return -1;
return 0;
@@ -1167,7 +1167,7 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
+ if (mwifiex_sta_init_cmd(priv, false))
return -1;
return 0;
@@ -1204,7 +1204,7 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
+ if (mwifiex_sta_init_cmd(priv, false))
return -1;
return 0;
@@ -2906,16 +2906,12 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
struct mwifiex_private *priv)
{
int rx_mcs_supp;
- struct ieee80211_mcs_info mcs_set;
- u8 *mcs = (u8 *)&mcs_set;
struct mwifiex_adapter *adapter = priv->adapter;
ht_info->ht_supported = true;
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
/* Fill HT capability information */
if (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -2961,17 +2957,15 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
rx_mcs_supp = GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
/* Set MCS for 1x1/2x2 */
- memset(mcs, 0xff, rx_mcs_supp);
- /* Clear all the other values */
- memset(&mcs[rx_mcs_supp], 0,
- sizeof(struct ieee80211_mcs_info) - rx_mcs_supp);
+ memset(ht_info->mcs.rx_mask, 0xff, rx_mcs_supp);
+
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
- SETHT_MCS32(mcs_set.rx_mask);
-
- memcpy((u8 *) &ht_info->mcs, mcs, sizeof(struct ieee80211_mcs_info));
+ SETHT_MCS32(ht_info->mcs.rx_mask);
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
}
@@ -3013,7 +3007,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- priv->wdev.wiphy = wiphy;
priv->wdev.iftype = NL80211_IFTYPE_STATION;
if (type == NL80211_IFTYPE_UNSPECIFIED)
@@ -3022,8 +3015,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_mode = type;
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
- priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
break;
@@ -3043,14 +3034,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- priv->wdev.wiphy = wiphy;
priv->wdev.iftype = NL80211_IFTYPE_AP;
priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
- priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
- priv->bss_started = 0;
priv->bss_mode = type;
break;
@@ -3070,7 +3057,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EFAULT);
}
- priv->wdev.wiphy = wiphy;
/* At start-up, wpa_supplicant tries to change the interface
* to NL80211_IFTYPE_STATION if it is not managed mode.
*/
@@ -3083,10 +3069,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
*/
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
- priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
- priv->bss_started = 0;
if (mwifiex_cfg80211_init_p2p_client(priv)) {
memset(&priv->wdev, 0, sizeof(priv->wdev));
@@ -3100,6 +3083,11 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
return ERR_PTR(-EINVAL);
}
+ priv->wdev.wiphy = wiphy;
+ priv->bss_priority = 0;
+ priv->bss_started = 0;
+ priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+
dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
name_assign_type, ether_setup,
IEEE80211_NUM_ACS, 1);
@@ -3122,7 +3110,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
if (ret)
goto err_set_bss_mode;
- ret = mwifiex_sta_init_cmd(priv, false, false);
+ ret = mwifiex_sta_init_cmd(priv, false);
if (ret)
goto err_sta_init;
}
@@ -4703,7 +4691,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
void *wdev_priv;
struct wiphy *wiphy;
struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
- u8 *country_code;
+ const u8 *country_code;
u32 thr, retry;
struct cfg80211_ops *ops;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index d7fd79214bcf..686bf12b6c26 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -153,7 +153,7 @@ struct region_code_mapping {
u8 region[IEEE80211_COUNTRY_STRING_LEN] __nonstring;
};
-static struct region_code_mapping region_code_mapping_t[] = {
+static const struct region_code_mapping region_code_mapping_t[] = {
{ 0x10, "US " }, /* US FCC */
{ 0x20, "CA " }, /* IC Canada */
{ 0x30, "FR " }, /* France */
@@ -165,7 +165,7 @@ static struct region_code_mapping region_code_mapping_t[] = {
};
/* This function converts integer code to region string */
-u8 *mwifiex_11d_code_2_region(u8 code)
+const u8 *mwifiex_11d_code_2_region(u8 code)
{
u8 i;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 5573e2ded72f..3bf27efe4537 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -159,11 +159,9 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
* sending. Afterwards, it logs the command ID and action for debugging
* and sets up the command timeout timer.
*/
-static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
+static int mwifiex_dnld_cmd_to_fw(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
-
- struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct host_cmd_ds_command *host_cmd;
uint16_t cmd_code;
@@ -367,8 +365,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
(test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
- mwifiex_hs_activated_event(mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY), true);
+ mwifiex_hs_activated_event(adapter, true);
}
return ret;
@@ -473,8 +470,7 @@ void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
int mwifiex_process_event(struct mwifiex_adapter *adapter)
{
int ret, i;
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_private *priv;
struct sk_buff *skb = adapter->event_skb;
u32 eventcause;
struct mwifiex_rxinfo *rx_info;
@@ -744,7 +740,6 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
*/
int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
{
- struct mwifiex_private *priv;
struct cmd_ctrl_node *cmd_node;
int ret = 0;
struct host_cmd_ds_command *host_cmd;
@@ -768,7 +763,6 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
struct cmd_ctrl_node, list);
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
- priv = cmd_node->priv;
if (adapter->ps_state != PS_STATE_AWAKE) {
mwifiex_dbg(adapter, ERROR,
@@ -783,18 +777,17 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
spin_unlock_bh(&adapter->cmd_pending_q_lock);
spin_unlock_bh(&adapter->mwifiex_cmd_lock);
- ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
- priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ ret = mwifiex_dnld_cmd_to_fw(adapter, cmd_node);
+
/* Any command sent to the firmware when host is in sleep
* mode should de-configure host sleep. We should skip the
* host sleep configuration command itself though
*/
- if (priv && (host_cmd->command !=
- cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
+ if (host_cmd->command != cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH)) {
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event(priv, false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -810,8 +803,7 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
{
struct host_cmd_ds_command *resp;
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_private *priv;
int ret = 0;
uint16_t orig_cmdresp_no;
uint16_t cmdresp_no;
@@ -900,18 +892,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp);
}
- /* Check init command response */
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
- if (ret) {
- mwifiex_dbg(adapter, ERROR,
- "%s: cmd %#x failed during\t"
- "initialization\n", __func__, cmdresp_no);
- mwifiex_init_fw_complete(adapter);
- return -1;
- } else if (adapter->last_init_cmd == cmdresp_no)
- adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE;
- }
-
if (adapter->curr_cmd) {
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = ret;
@@ -1030,10 +1010,6 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
mwifiex_cancel_pending_ioctl(adapter);
}
}
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
- mwifiex_init_fw_complete(adapter);
- return;
- }
if (adapter->if_ops.device_dump)
adapter->if_ops.device_dump(adapter);
@@ -1160,27 +1136,27 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
* This event is generated by the driver, with a blank event body.
*/
void
-mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
+mwifiex_hs_activated_event(struct mwifiex_adapter *adapter, u8 activated)
{
if (activated) {
if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
- &priv->adapter->work_flags)) {
- priv->adapter->hs_activated = true;
- mwifiex_update_rxreor_flags(priv->adapter,
+ &adapter->work_flags)) {
+ adapter->hs_activated = true;
+ mwifiex_update_rxreor_flags(adapter,
RXREOR_FORCE_NO_DROP);
- mwifiex_dbg(priv->adapter, EVENT,
+ mwifiex_dbg(adapter, EVENT,
"event: hs_activated\n");
- priv->adapter->hs_activate_wait_q_woken = true;
+ adapter->hs_activate_wait_q_woken = true;
wake_up_interruptible(
- &priv->adapter->hs_activate_wait_q);
+ &adapter->hs_activate_wait_q);
} else {
- mwifiex_dbg(priv->adapter, EVENT,
+ mwifiex_dbg(adapter, EVENT,
"event: HS not configured\n");
}
} else {
- mwifiex_dbg(priv->adapter, EVENT,
+ mwifiex_dbg(adapter, EVENT,
"event: hs_deactivated\n");
- priv->adapter->hs_activated = false;
+ adapter->hs_activated = false;
}
}
@@ -1204,7 +1180,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
adapter->iface_type != MWIFIEX_USB) {
- mwifiex_hs_activated_event(priv, true);
+ mwifiex_hs_activated_event(adapter, true);
return 0;
} else {
mwifiex_dbg(adapter, CMD,
@@ -1217,11 +1193,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
if (conditions != HS_CFG_CANCEL) {
set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->iface_type == MWIFIEX_USB)
- mwifiex_hs_activated_event(priv, true);
+ mwifiex_hs_activated_event(adapter, true);
} else {
clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
if (adapter->hs_activated)
- mwifiex_hs_activated_event(priv, false);
+ mwifiex_hs_activated_event(adapter, false);
}
return 0;
@@ -1250,9 +1226,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
adapter->hs_activated = false;
clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
- mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
EXPORT_SYMBOL_GPL(mwifiex_process_hs_config);
@@ -1302,9 +1276,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
}
adapter->pm_wakeup_card_req = true;
if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
- mwifiex_hs_activated_event(mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- true);
+ mwifiex_hs_activated_event(adapter, true);
adapter->ps_state = PS_STATE_SLEEP;
cmd->command = cpu_to_le16(command);
cmd->seq_num = cpu_to_le16(seq_num);
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index ce0d42e72e94..32c374e47794 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -480,14 +480,12 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
* - Initialize the private structure
* - Add BSS priority tables to the adapter structure
* - For each interface, send the init commands to firmware
- * - Send the first command in command pending queue, if available
*/
int mwifiex_init_fw(struct mwifiex_adapter *adapter)
{
int ret;
struct mwifiex_private *priv;
u8 i, first_sta = true;
- int is_cmd_pend_q_empty;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
@@ -509,11 +507,10 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
}
if (adapter->mfg_mode) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- ret = -EINPROGRESS;
} else {
for (i = 0; i < adapter->priv_num; i++) {
ret = mwifiex_sta_init_cmd(adapter->priv[i],
- first_sta, true);
+ first_sta);
if (ret == -1)
return -1;
@@ -522,17 +519,15 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
}
spin_lock_bh(&adapter->cmd_pending_q_lock);
- is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
+ WARN_ON(!list_empty(&adapter->cmd_pending_q));
spin_unlock_bh(&adapter->cmd_pending_q_lock);
- if (!is_cmd_pend_q_empty) {
- /* Send the first command in queue and return */
- if (mwifiex_main_process(adapter) != -1)
- ret = -EINPROGRESS;
- } else {
- adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- }
- return ret;
+ adapter->hw_status = MWIFIEX_HW_STATUS_READY;
+
+ if (adapter->if_ops.init_fw_port)
+ adapter->if_ops.init_fw_port(adapter);
+
+ return 0;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 0e1f53940401..7b50a88a18e5 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -6,6 +6,7 @@
*/
#include <linux/suspend.h>
+#include <net/sock.h>
#include "main.h"
#include "wmm.h"
@@ -354,13 +355,6 @@ process_start:
if (adapter->cmd_resp_received) {
adapter->cmd_resp_received = false;
mwifiex_process_cmdresp(adapter);
-
- /* call mwifiex back when init_fw is done */
- if (adapter->hw_status == MWIFIEX_HW_STATUS_INIT_DONE) {
- adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- mwifiex_init_fw_complete(adapter);
- maybe_quirk_fw_disable_ds(adapter);
- }
}
/* Check if we need to confirm Sleep Request
@@ -415,10 +409,7 @@ process_start:
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event
- (mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -438,10 +429,7 @@ process_start:
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event
- (mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -460,10 +448,7 @@ process_start:
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
&adapter->work_flags);
- mwifiex_hs_activated_event
- (mwifiex_get_priv
- (adapter, MWIFIEX_BSS_ROLE_ANY),
- false);
+ mwifiex_hs_activated_event(adapter, false);
}
}
@@ -587,21 +572,11 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto err_dnld_fw;
}
- adapter->init_wait_q_woken = false;
ret = mwifiex_init_fw(adapter);
- if (ret == -1) {
+ if (ret == -1)
goto err_init_fw;
- } else if (!ret) {
- adapter->hw_status = MWIFIEX_HW_STATUS_READY;
- goto done;
- }
- /* Wait for mwifiex_init to complete */
- if (!adapter->mfg_mode) {
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
- if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
- goto err_init_fw;
- }
+
+ maybe_quirk_fw_disable_ds(adapter);
if (!adapter->wiphy) {
if (mwifiex_register_cfg80211(adapter)) {
@@ -938,8 +913,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
multicast = is_multicast_ether_addr(skb->data);
- if (unlikely(!multicast && skb->sk &&
- skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS &&
+ if (unlikely(!multicast && sk_requests_wifi_status(skb->sk) &&
priv->adapter->fw_api_ver == MWIFIEX_FW_V15))
skb = mwifiex_clone_skb_for_tx_status(priv,
skb,
@@ -1558,7 +1532,6 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
- init_waitqueue_head(&adapter->init_wait_q);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
@@ -1726,7 +1699,6 @@ mwifiex_add_card(void *card, struct completion *fw_done,
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
- init_waitqueue_head(&adapter->init_wait_q);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
adapter->hs_activated = false;
init_waitqueue_head(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 63f1c900e096..9ac36bef980e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -239,7 +239,6 @@ struct mwifiex_dbg {
enum MWIFIEX_HARDWARE_STATUS {
MWIFIEX_HW_STATUS_READY,
MWIFIEX_HW_STATUS_INITIALIZING,
- MWIFIEX_HW_STATUS_INIT_DONE,
MWIFIEX_HW_STATUS_RESET,
MWIFIEX_HW_STATUS_NOT_READY
};
@@ -865,8 +864,6 @@ struct mwifiex_adapter {
unsigned long work_flags;
u32 fw_release_number;
u8 intf_hdr_len;
- u16 init_wait_q_woken;
- wait_queue_head_t init_wait_q;
void *card;
struct mwifiex_if_ops if_ops;
atomic_t bypass_tx_pending;
@@ -919,7 +916,6 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *curr_cmd;
/* spin lock for command */
spinlock_t mwifiex_cmd_lock;
- u16 last_init_cmd;
struct timer_list cmd_timer;
struct list_head cmd_free_q;
/* spin lock for cmd_free_q */
@@ -1060,8 +1056,6 @@ void mwifiex_free_priv(struct mwifiex_private *priv);
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
-int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
-
void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
@@ -1125,7 +1119,7 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
struct mwifiex_ds_pm_cfg *pm_cfg);
void mwifiex_process_hs_config(struct mwifiex_adapter *adapter);
-void mwifiex_hs_activated_event(struct mwifiex_private *priv,
+void mwifiex_hs_activated_event(struct mwifiex_adapter *adapter,
u8 activated);
int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg);
@@ -1156,7 +1150,7 @@ void mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct sk_buff *skb);
void mwifiex_process_uap_txpd(struct mwifiex_private *priv,
struct sk_buff *skb);
-int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta, bool init);
+int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
struct mwifiex_scan_cmd_config *scan_cfg);
void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
@@ -1565,7 +1559,7 @@ int mwifiex_add_wowlan_magic_pkt_filter(struct mwifiex_adapter *adapter);
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
-u8 *mwifiex_11d_code_2_region(u8 code);
+const u8 *mwifiex_11d_code_2_region(u8 code);
void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg,
struct cfg80211_chan_def chandef);
@@ -1598,7 +1592,6 @@ mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
struct mwifiex_sta_node *
mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
-u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
u8 action_code, u8 dialog_token,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index dd2a42e732f2..a760de191fce 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2971,7 +2971,7 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
goto err_iomap2;
}
- pr_notice("PCI memory map Virt0: %pK PCI memory map Virt2: %pK\n",
+ pr_notice("PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
card->pci_mmap, card->pci_mmap1);
ret = mwifiex_pcie_alloc_buffers(adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index c4689f5a1acc..c93281f5a47c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -157,7 +157,7 @@ mwifiex_cmd_802_11_get_log(struct host_cmd_ds_command *cmd)
*/
static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- u16 cmd_action, u16 *pbitmap_rates)
+ u16 cmd_action, const u16 *pbitmap_rates)
{
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &cmd->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
@@ -174,34 +174,19 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
rate_scope->type = cpu_to_le16(TLV_TYPE_RATE_SCOPE);
rate_scope->length = cpu_to_le16
(sizeof(*rate_scope) - sizeof(struct mwifiex_ie_types_header));
- if (pbitmap_rates != NULL) {
- rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
- rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
- for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
- rate_scope->ht_mcs_rate_bitmap[i] =
- cpu_to_le16(pbitmap_rates[2 + i]);
- if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
- for (i = 0;
- i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
- i++)
- rate_scope->vht_mcs_rate_bitmap[i] =
- cpu_to_le16(pbitmap_rates[10 + i]);
- }
- } else {
- rate_scope->hr_dsss_rate_bitmap =
- cpu_to_le16(priv->bitmap_rates[0]);
- rate_scope->ofdm_rate_bitmap =
- cpu_to_le16(priv->bitmap_rates[1]);
- for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
- rate_scope->ht_mcs_rate_bitmap[i] =
- cpu_to_le16(priv->bitmap_rates[2 + i]);
- if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
- for (i = 0;
- i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
- i++)
- rate_scope->vht_mcs_rate_bitmap[i] =
- cpu_to_le16(priv->bitmap_rates[10 + i]);
- }
+ if (!pbitmap_rates)
+ pbitmap_rates = priv->bitmap_rates;
+
+ rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]);
+ rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]);
+
+ for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++)
+ rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(pbitmap_rates[2 + i]);
+
+ if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+ for (i = 0; i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap); i++)
+ rate_scope->vht_mcs_rate_bitmap[i] =
+ cpu_to_le16(pbitmap_rates[10 + i]);
}
rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
@@ -2258,7 +2243,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
* - Set 11d control
* - Set MAC control (this must be the last command to initialize firmware)
*/
-int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
+int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
@@ -2433,11 +2418,5 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
- if (init) {
- /* set last_init_cmd before sending the command */
- priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
- ret = -EINPROGRESS;
- }
-
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index bd91678d26b4..f44e22f24511 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -24,8 +24,7 @@
int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_private *priv;
struct rxpd *local_rx_pd;
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
int ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 1f1f6280a0f2..4c5b1de0e936 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -116,24 +116,6 @@ static struct mwifiex_debug_data items[] = {
static int num_of_items = ARRAY_SIZE(items);
/*
- * Firmware initialization complete callback handler.
- *
- * This function wakes up the function waiting on the init
- * wait queue for the firmware initialization to complete.
- */
-int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
-{
-
- if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
- if (adapter->if_ops.init_fw_port)
- adapter->if_ops.init_fw_port(adapter);
-
- adapter->init_wait_q_woken = true;
- wake_up_interruptible(&adapter->init_wait_q);
- return 0;
-}
-
-/*
* This function sends init/shutdown command
* to firmware.
*/
@@ -663,7 +645,7 @@ u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
return false;
}
-u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+static u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *sta_ptr;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index bcb61dab7dc8..1b1222c73728 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1428,13 +1428,13 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
}
if (!ptr->is_11n_enabled ||
- ptr->ba_status ||
- priv->wps.session_enable) {
+ ptr->ba_status ||
+ priv->wps.session_enable) {
if (ptr->is_11n_enabled &&
- ptr->ba_status &&
- ptr->amsdu_in_ampdu &&
- mwifiex_is_amsdu_allowed(priv, tid) &&
- mwifiex_is_11n_aggragation_possible(priv, ptr,
+ ptr->ba_status &&
+ ptr->amsdu_in_ampdu &&
+ mwifiex_is_amsdu_allowed(priv, tid) &&
+ mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index e7b839e74290..cc2d888e3f17 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -302,11 +302,13 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt76_dev *dev = phy->dev;
- struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt76_vif_data *mvif;
if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
return;
+ mvif = mlink->mvif;
+
rcu_assign_pointer(mvif->offchannel_link, NULL);
dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
kfree(mlink);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 844af16ee551..35b4ec91979e 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -1011,6 +1011,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
int i;
mt76_worker_disable(&dev->tx_worker);
+ napi_disable(&dev->tx_napi);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index b88d7e10742e..45c8db939d55 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -449,8 +449,10 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
- wiphy->available_antennas_tx = phy->antenna_mask;
- wiphy->available_antennas_rx = phy->antenna_mask;
+ if (!wiphy->available_antennas_tx)
+ wiphy->available_antennas_tx = phy->antenna_mask;
+ if (!wiphy->available_antennas_rx)
+ wiphy->available_antennas_rx = phy->antenna_mask;
wiphy->sar_capa = &mt76_sar_capa;
phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges,
@@ -1703,7 +1705,7 @@ s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
int n_chains = hweight16(phy->chainmask);
txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
- txpower -= mt76_tx_power_nss_delta(n_chains);
+ txpower -= mt76_tx_power_path_delta(n_chains);
return txpower;
}
@@ -1719,7 +1721,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -EINVAL;
n_chains = hweight16(phy->chainmask);
- delta = mt76_tx_power_nss_delta(n_chains);
+ delta = mt76_tx_power_path_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index d7cd467b812f..5f8d81cda6cd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -162,6 +162,16 @@ enum mt76_dfs_state {
MT_DFS_STATE_ACTIVE,
};
+#define MT76_RNR_SCAN_MAX_BSSIDS 16
+struct mt76_scan_rnr_param {
+ u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN];
+ u8 channel[MT76_RNR_SCAN_MAX_BSSIDS];
+ u8 random_mac[ETH_ALEN];
+ u8 seq_num;
+ u8 bssid_num;
+ u32 sreq_flag;
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len:15,
@@ -941,6 +951,8 @@ struct mt76_dev {
char alpha2[3];
enum nl80211_dfs_regions region;
+ struct mt76_scan_rnr_param rnr;
+
u32 debugfs_reg;
u8 csa_complete;
@@ -1386,12 +1398,12 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
return pktid >= MT_PACKET_ID_FIRST;
}
-static inline u8 mt76_tx_power_nss_delta(u8 nss)
+static inline u8 mt76_tx_power_path_delta(u8 path)
{
- static const u8 nss_delta[4] = { 0, 6, 9, 12 };
- u8 idx = nss - 1;
+ static const u8 path_delta[5] = { 0, 6, 9, 12, 14 };
+ u8 idx = path - 1;
- return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
+ return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0;
}
static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 66ba3be27343..aae80005a3c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -273,7 +273,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
- int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
+ int delta_idx, delta = mt76_tx_power_path_delta(n_chains);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
struct mt76_power_limits limits;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index b8fcd4eb3fbb..4064e193d4de 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -2067,7 +2067,7 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
};
tx_power = mt76_get_sar_power(mphy, mphy->chandef.chan, tx_power);
- tx_power -= mt76_tx_power_nss_delta(n_chains);
+ tx_power -= mt76_tx_power_path_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits, tx_power);
mphy->txpower_cur = tx_power;
@@ -2084,8 +2084,8 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
int delta = 0;
if (i < n_chains - 1)
- delta = mt76_tx_power_nss_delta(n_chains) -
- mt76_tx_power_nss_delta(i + 1);
+ delta = mt76_tx_power_path_delta(n_chains) -
+ mt76_tx_power_path_delta(i + 1);
sku[MT_SKU_1SS_DELTA + i] = delta;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 455979476d11..192dcc374a64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -232,9 +232,14 @@ static inline bool is_mt7992(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7992;
}
+static inline bool is_mt7990(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7993;
+}
+
static inline bool is_mt799x(struct mt76_dev *dev)
{
- return is_mt7996(dev) || is_mt7992(dev);
+ return is_mt7996(dev) || is_mt7992(dev) || is_mt7990(dev);
}
static inline bool is_mt7622(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 487ad716f872..1013cad57a7f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -273,6 +273,7 @@ enum tx_frag_idx {
#define MT_TXD6_TX_RATE GENMASK(21, 16)
#define MT_TXD6_TIMESTAMP_OFS_EN BIT(15)
#define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10)
+#define MT_TXD6_TID_ADDBA GENMASK(10, 8)
#define MT_TXD6_MSDU_CNT GENMASK(9, 4)
#define MT_TXD6_MSDU_CNT_V2 GENMASK(15, 10)
#define MT_TXD6_DIS_MAT BIT(3)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index bafcf5a279e2..cb13d0a76878 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -67,8 +67,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
(is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
- (is_mt7996(dev) && addr == 0x900000) ||
- (is_mt7992(dev) && addr == 0x900000))
+ (is_mt799x(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -391,7 +390,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- link_conf && !is_zero_ether_addr(link_conf->bssid)) {
+ !is_zero_ether_addr(link_conf->bssid)) {
memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
@@ -1667,6 +1666,44 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
+void mt76_connac_mcu_build_rnr_scan_param(struct mt76_dev *mdev,
+ struct cfg80211_scan_request *sreq)
+{
+ struct ieee80211_channel **scan_list = sreq->channels;
+ int i, bssid_index = 0;
+
+ /* clear 6G active Scan BSSID table */
+ memset(&mdev->rnr, 0, sizeof(mdev->rnr));
+
+ for (i = 0; i < sreq->n_6ghz_params; i++) {
+ u8 ch_idx = sreq->scan_6ghz_params[i].channel_idx;
+ int k = 0;
+
+ /* Remove the duplicated BSSID */
+ for (k = 0; k < bssid_index; k++) {
+ if (!memcmp(&mdev->rnr.bssid[k],
+ sreq->scan_6ghz_params[i].bssid,
+ ETH_ALEN))
+ break;
+ }
+
+ if (k == bssid_index &&
+ bssid_index < MT76_RNR_SCAN_MAX_BSSIDS) {
+ memcpy(&mdev->rnr.bssid[bssid_index++],
+ sreq->scan_6ghz_params[i].bssid, ETH_ALEN);
+ mdev->rnr.channel[k] = scan_list[ch_idx]->hw_value;
+ }
+ }
+
+ mdev->rnr.bssid_num = bssid_index;
+
+ if (sreq->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ memcpy(mdev->rnr.random_mac, sreq->mac_addr, ETH_ALEN);
+ mdev->rnr.sreq_flag = sreq->flags;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_build_rnr_scan_param);
+
#define MT76_CONNAC_SCAN_CHANNEL_TIME 60
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 43237e518373..27daf419560a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -869,6 +869,7 @@ enum {
#define NETWORK_WDS BIT(21)
#define SCAN_FUNC_RANDOM_MAC BIT(0)
+#define SCAN_FUNC_RNR_SCAN BIT(3)
#define SCAN_FUNC_SPLIT_SCAN BIT(5)
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
@@ -1065,6 +1066,7 @@ enum {
MCU_UNI_EVENT_WED_RRO = 0x57,
MCU_UNI_EVENT_PER_STA_INFO = 0x6d,
MCU_UNI_EVENT_ALL_STA_INFO = 0x6e,
+ MCU_UNI_EVENT_SDO = 0x83,
};
#define MCU_UNI_CMD_EVENT BIT(1)
@@ -1182,6 +1184,11 @@ enum {
#define MCU_UNI_CMD(_t) (__MCU_CMD_FIELD_UNI | \
FIELD_PREP(__MCU_CMD_FIELD_ID, \
MCU_UNI_CMD_##_t))
+
+#define MCU_UNI_QUERY(_t) (__MCU_CMD_FIELD_UNI | __MCU_CMD_FIELD_QUERY | \
+ FIELD_PREP(__MCU_CMD_FIELD_ID, \
+ MCU_UNI_CMD_##_t))
+
#define MCU_CE_CMD(_t) (__MCU_CMD_FIELD_CE | \
FIELD_PREP(__MCU_CMD_FIELD_ID, \
MCU_CE_CMD_##_t))
@@ -1287,16 +1294,20 @@ enum {
MCU_UNI_CMD_EFUSE_CTRL = 0x2d,
MCU_UNI_CMD_RA = 0x2f,
MCU_UNI_CMD_MURU = 0x31,
+ MCU_UNI_CMD_TESTMODE_RX_STAT = 0x32,
MCU_UNI_CMD_BF = 0x33,
MCU_UNI_CMD_CHANNEL_SWITCH = 0x34,
MCU_UNI_CMD_THERMAL = 0x35,
MCU_UNI_CMD_VOW = 0x37,
MCU_UNI_CMD_FIXED_RATE_TABLE = 0x40,
+ MCU_UNI_CMD_TESTMODE_CTRL = 0x46,
MCU_UNI_CMD_RRO = 0x57,
MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
MCU_UNI_CMD_PER_STA_INFO = 0x6d,
MCU_UNI_CMD_ALL_STA_INFO = 0x6e,
MCU_UNI_CMD_ASSERT_DUMP = 0x6f,
+ MCU_UNI_CMD_RADIO_STATUS = 0x80,
+ MCU_UNI_CMD_SDO = 0x88,
};
enum {
@@ -1369,6 +1380,7 @@ enum {
UNI_BSS_INFO_OFFLOAD = 25,
UNI_BSS_INFO_MLD = 26,
UNI_BSS_INFO_PM_DISABLE = 27,
+ UNI_BSS_INFO_EHT = 30,
};
enum {
@@ -1969,6 +1981,8 @@ int mt76_connac_mcu_start_patch(struct mt76_dev *dev);
int mt76_connac_mcu_patch_sem_ctrl(struct mt76_dev *dev, bool get);
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option);
+void mt76_connac_mcu_build_rnr_scan_param(struct mt76_dev *mdev,
+ struct cfg80211_scan_request *sreq);
int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
int mt76_connac_mcu_cancel_hw_scan(struct mt76_phy *phy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index a82c75ba26e6..a683d53c7ceb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -174,7 +174,6 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
int mt76x02_dma_init(struct mt76x02_dev *dev)
{
- struct mt76_txwi_cache __maybe_unused *t;
int i, ret, fifo_size;
struct mt76_queue *q;
void *status_fifo;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 84ef80ab4afb..96cecc576a98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -17,6 +17,8 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
+ { USB_DEVICE(0x0471, 0x2126) }, /* LiteOn WN4516R module, nonstandard USB connector */
+ { USB_DEVICE(0x0471, 0x7600) }, /* LiteOn WN4519R module, nonstandard USB connector */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
{ USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 33a14365ec9b..3b5562811511 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -191,6 +191,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
struct mt76_usb *usb = &dev->mt76.usb;
+ bool vht;
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
@@ -217,7 +218,17 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
/* check hw sg support in order to enable AMSDU */
hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
- err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
+ switch (dev->mt76.rev) {
+ case 0x76320044:
+ /* these ASIC revisions do not support VHT */
+ vht = false;
+ break;
+ default:
+ vht = true;
+ break;
+ }
+
+ err = mt76_register_device(&dev->mt76, vht, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (err)
goto fail;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 192e8eff970b..b287b7d9394e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -211,13 +211,28 @@ static const struct file_operations mt7915_sys_recovery_ops = {
static int
mt7915_radar_trigger(void *data, u64 val)
{
- struct mt7915_dev *dev = data;
+#define RADAR_MAIN_CHAIN 1
+#define RADAR_BACKGROUND 2
+ struct mt7915_phy *phy = data;
+ struct mt7915_dev *dev = phy->dev;
+ int rdd_idx;
+
+ if (!val || val > RADAR_BACKGROUND)
+ return -EINVAL;
- if (val > MT_RX_SEL2)
+ if (val == RADAR_BACKGROUND && !dev->rdd2_phy) {
+ dev_err(dev->mt76.dev, "Background radar is not enabled\n");
return -EINVAL;
+ }
+
+ rdd_idx = mt7915_get_rdd_idx(phy, val == RADAR_BACKGROUND);
+ if (rdd_idx < 0) {
+ dev_err(dev->mt76.dev, "No RDD found\n");
+ return -EINVAL;
+ }
return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
- val, 0, 0);
+ rdd_idx, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
@@ -445,6 +460,11 @@ mt7915_rdd_monitor(struct seq_file *s, void *data)
mutex_lock(&dev->mt76.mutex);
+ if (!mt7915_eeprom_has_background_radar(dev)) {
+ seq_puts(s, "no background radar capability\n");
+ goto out;
+ }
+
if (!cfg80211_chandef_valid(chandef)) {
ret = -EINVAL;
goto out;
@@ -1242,7 +1262,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy)
if (!dev->dbdc_support || phy->mt76->band_idx) {
debugfs_create_u32("dfs_hw_pattern", 0400, dir,
&dev->hw_pattern);
- debugfs_create_file("radar_trigger", 0200, dir, dev,
+ debugfs_create_file("radar_trigger", 0200, dir, phy,
&fops_radar_trigger);
debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
mt7915_rdd_monitor);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 928e0b07a9bf..c0f3402d30bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -147,7 +147,7 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
/* read eeprom data from efuse */
block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
for (i = 0; i < block_num; i++) {
- ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size, NULL);
if (ret < 0)
return ret;
}
@@ -361,6 +361,37 @@ s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
}
+bool
+mt7915_eeprom_has_background_radar(struct mt7915_dev *dev)
+{
+ u8 val, buf[MT7915_EEPROM_BLOCK_SIZE];
+ u8 band_sel, tx_path, rx_path;
+ int offs = MT_EE_WIFI_CONF + 1;
+
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ return true;
+ case 0x7906:
+ /* read efuse to check background radar capability */
+ if (mt7915_mcu_get_eeprom(dev, offs, buf))
+ break;
+
+ val = buf[offs % MT7915_EEPROM_BLOCK_SIZE];
+ band_sel = u8_get_bits(val, MT_EE_WIFI_CONF0_BAND_SEL);
+ tx_path = u8_get_bits(val, MT_EE_WIFI_CONF0_TX_PATH);
+ rx_path = u8_get_bits(val, MT_EE_WIFI_CONF0_RX_PATH);
+
+ return (band_sel == MT_EE_V2_BAND_SEL_5GHZ &&
+ tx_path == rx_path && rx_path == 2);
+ case 0x7981:
+ case 0x7986:
+ default:
+ break;
+ }
+
+ return false;
+}
+
const u8 mt7915_sku_group_len[] = {
[SKU_CCK] = 4,
[SKU_OFDM] = 8,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 509fb43d8a68..31aec0f40232 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -55,6 +55,7 @@ enum mt7915_eeprom_field {
#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
+#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index bee4beabc4eb..3e30ca5155d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -285,7 +285,7 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy,
{
struct mt7915_dev *dev = phy->dev;
int i, n_chains = hweight16(phy->mt76->chainmask);
- int nss_delta = mt76_tx_power_nss_delta(n_chains);
+ int path_delta = mt76_tx_power_path_delta(n_chains);
int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
@@ -305,7 +305,7 @@ static void __mt7915_init_txpower(struct mt7915_phy *phy,
target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
- target_power += nss_delta;
+ target_power += path_delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
@@ -392,9 +392,10 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
if (!is_mt7915(&dev->mt76))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
- if (!mdev->dev->of_node ||
- !of_property_read_bool(mdev->dev->of_node,
- "mediatek,disable-radar-background"))
+ if (mt7915_eeprom_has_background_radar(phy->dev) &&
+ (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background")))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_RADAR_BACKGROUND);
@@ -924,8 +925,7 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US;
if (!is_mt7915(&dev->mt76))
- c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 2ba6eb3038ce..9400e4af2a04 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -2035,16 +2035,15 @@ void mt7915_mac_work(struct work_struct *work)
static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
+ int rdd_idx = mt7915_get_rdd_idx(phy, false);
- if (phy->rdd_state & BIT(0))
- mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
- MT_RX_SEL0, 0);
- if (phy->rdd_state & BIT(1))
- mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
- MT_RX_SEL0, 0);
+ if (rdd_idx < 0)
+ return;
+
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0);
}
-static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
+static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int rdd_idx)
{
int err, region;
@@ -2061,52 +2060,38 @@ static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
break;
}
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
- MT_RX_SEL0, region);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region);
if (err < 0)
return err;
if (is_mt7915(&dev->mt76)) {
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, rdd_idx,
0, dev->dbdc_support ? 2 : 0);
if (err < 0)
return err;
}
- return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, rdd_idx, 0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
- int err;
+ int err, rdd_idx;
+
+ rdd_idx = mt7915_get_rdd_idx(phy, false);
+ if (rdd_idx < 0)
+ return -EINVAL;
/* start CAC */
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, rdd_idx, 0, 0);
if (err < 0)
return err;
- err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
+ err = mt7915_dfs_start_rdd(dev, rdd_idx);
if (err < 0)
return err;
- phy->rdd_state |= BIT(phy->mt76->band_idx);
-
- if (!is_mt7915(&dev->mt76))
- return 0;
-
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7915_dfs_start_rdd(dev, 1);
- if (err < 0)
- return err;
-
- phy->rdd_state |= BIT(1);
- }
-
return 0;
}
@@ -2148,12 +2133,12 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
enum mt76_dfs_state dfs_state, prev_state;
- int err;
+ int err, rdd_idx = mt7915_get_rdd_idx(phy, false);
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (prev_state == dfs_state)
+ if (prev_state == dfs_state || rdd_idx < 0)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
@@ -2177,8 +2162,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END, rdd_idx, 0, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
@@ -2188,15 +2172,13 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
return 0;
stop:
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, rdd_idx, 0, 0);
if (err < 0)
return err;
if (is_mt7915(&dev->mt76)) {
err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
- phy->mt76->band_idx, 0,
- dev->dbdc_support ? 2 : 0);
+ rdd_idx, 0, dev->dbdc_support ? 2 : 0);
if (err < 0)
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 3643c72bb68d..427542777abc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -303,17 +303,35 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt7915_mcu_rdd_report *r;
+ u32 sku;
r = (struct mt7915_mcu_rdd_report *)skb->data;
- if (r->band_idx > MT_RX_SEL2)
+ switch (r->rdd_idx) {
+ case MT_RDD_IDX_BAND0:
+ break;
+ case MT_RDD_IDX_BAND1:
+ sku = mt7915_check_adie(dev, true);
+ /* the main phy is bound to band 1 for this sku */
+ if (is_mt7986(&dev->mt76) &&
+ (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE))
+ break;
+ mphy = dev->mt76.phys[MT_BAND1];
+ break;
+ case MT_RDD_IDX_BACKGROUND:
+ if (!dev->rdd2_phy)
+ return;
+ mphy = dev->rdd2_phy->mt76;
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx);
return;
+ }
- if ((r->band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
- mphy = dev->mt76.phys[MT_BAND1];
+ if (!mphy)
+ return;
- if (r->band_idx == MT_RX_SEL2)
+ if (r->rdd_idx == MT_RDD_IDX_BACKGROUND)
cfg80211_background_radar_event(mphy->hw->wiphy,
&dev->rdd2_chandef,
GFP_ATOMIC);
@@ -2697,11 +2715,14 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
struct cfg80211_chan_def *chandef)
{
struct mt7915_dev *dev = phy->dev;
- int err, region;
+ int err, region, rdd_idx;
+
+ rdd_idx = mt7915_get_rdd_idx(phy, true);
+ if (rdd_idx < 0)
+ return -EINVAL;
if (!chandef) { /* disable offchain */
- err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
- 0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0);
if (err)
return err;
@@ -2727,8 +2748,7 @@ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
break;
}
- return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
- 0, region);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region);
}
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
@@ -2859,7 +2879,7 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
&req, sizeof(req), true);
}
-int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf)
{
struct mt7915_mcu_eeprom_info req = {
.addr = cpu_to_le32(round_down(offset,
@@ -2867,8 +2887,8 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
};
struct mt7915_mcu_eeprom_info *res;
struct sk_buff *skb;
+ u8 *buf = read_buf;
int ret;
- u8 *buf;
ret = mt76_mcu_send_and_get_msg(&dev->mt76,
MCU_EXT_QUERY(EFUSE_ACCESS),
@@ -2877,8 +2897,10 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
return ret;
res = (struct mt7915_mcu_eeprom_info *)skb->data;
- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
+ if (!buf)
+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
+
dev_kfree_skb(skb);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 092ed504a8f2..086ad89ecd91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -57,7 +57,7 @@ struct mt7915_mcu_bcc_notify {
struct mt7915_mcu_rdd_report {
struct mt76_connac2_mcu_rxd_hdr rxd;
- u8 band_idx;
+ u8 rdd_idx;
u8 long_detected;
u8 constant_prf_detected;
u8 staggered_prf_detected;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 876f0692850a..9c4d5cea0c42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -651,6 +651,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.base = devm_ioremap(dev->mt76.dev,
pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
+ if (!wed->wlan.base)
+ return -ENOMEM;
+
wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
MT_INT_WED_SOURCE_CSR;
@@ -678,6 +681,9 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.bus_type = MTK_WED_BUS_AXI;
wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
resource_size(res));
+ if (!wed->wlan.base)
+ return -ENOMEM;
+
wed->wlan.phy_base = res->start;
wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 533939f2b7ed..2e94347c46d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -215,8 +215,6 @@ struct mt7915_phy {
s16 coverage_class;
u8 slottime;
- u8 rdd_state;
-
u32 trb_ts;
u32 rx_ampdu_ts;
@@ -331,10 +329,10 @@ enum {
__MT_WFDMA_MAX,
};
-enum {
- MT_RX_SEL0,
- MT_RX_SEL1,
- MT_RX_SEL2, /* monitor chain */
+enum rdd_idx {
+ MT_RDD_IDX_BAND0, /* RDD idx for band idx 0 (single-band) */
+ MT_RDD_IDX_BAND1, /* RDD idx for band idx 1 */
+ MT_RDD_IDX_BACKGROUND, /* RDD idx for background chain */
};
enum mt7915_rdd_cmd {
@@ -354,6 +352,18 @@ enum mt7915_rdd_cmd {
RDD_IRQ_OFF,
};
+static inline int
+mt7915_get_rdd_idx(struct mt7915_phy *phy, bool is_background)
+{
+ if (!phy->mt76->cap.has_5ghz)
+ return -1;
+
+ if (is_background)
+ return MT_RDD_IDX_BACKGROUND;
+
+ return phy->mt76->band_idx;
+}
+
static inline struct mt7915_phy *
mt7915_hw_phy(struct ieee80211_hw *hw)
{
@@ -425,6 +435,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
+bool mt7915_eeprom_has_background_radar(struct mt7915_dev *dev);
int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
@@ -473,7 +484,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
struct ieee80211_sta *sta,
void *data, u32 field);
int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
-int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf);
int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
bool hdr_trans);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 826c48a2ee69..1fffa43379b2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -83,6 +83,11 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+
+ if (is_mt7922(phy->mt76->dev)) {
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[1] |=
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile
index d321e4ed732f..ade5e647c941 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_MT7925E) += mt7925e.o
obj-$(CONFIG_MT7925U) += mt7925u.o
mt7925-common-y := mac.o mcu.o main.o init.o debugfs.o
+mt7925-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt7925e-y := pci.o pci_mac.o pci_mcu.o
mt7925u-y := usb.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 63cb08f4d87c..2a83ff59a968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -89,7 +89,7 @@ void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2)
}
/* Check the last one */
- if (rule->flag && BIT(0))
+ if (rule->flag & BIT(0))
break;
pos += sizeof(*rule);
@@ -322,6 +322,12 @@ static void mt7925_init_work(struct work_struct *work)
return;
}
+ ret = mt7925_mcu_set_thermal_protect(dev);
+ if (ret) {
+ dev_err(dev->mt76.dev, "thermal protection enable failed\n");
+ return;
+ }
+
/* we support chip reset now */
dev->hw_init_done = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 66f327781947..94b0099dcd41 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -334,6 +334,9 @@ int __mt7925_start(struct mt792x_phy *phy)
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
+ if (phy->chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN)
+ wiphy_rfkill_start_polling(mphy->hw->wiphy);
+
return 0;
}
EXPORT_SYMBOL_GPL(__mt7925_start);
@@ -1865,6 +1868,10 @@ mt7925_change_chanctx(struct ieee80211_hw *hw,
link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76,
link_conf, ctx);
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
+ mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76,
+ link_conf, ctx);
}
}
@@ -1954,8 +1961,10 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
struct mt792x_phy *phy = mt792x_hw_phy(hw);
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_bss_conf *mconf;
+ struct ieee80211_bss_conf *link_conf;
mconf = mt792x_vif_to_link(mvif, info->link_id);
+ link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
mt792x_mutex_acquire(dev);
@@ -1997,6 +2006,10 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS;
}
+ if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
+ mt7925_mcu_set_eht_pp(mvif->phy->mt76, &mconf->mt76,
+ link_conf, NULL);
+
mt792x_mutex_release(dev);
}
@@ -2195,6 +2208,18 @@ static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static void mt7925_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ int ret;
+
+ mt792x_mutex_acquire(phy->dev);
+ ret = mt7925_mcu_wf_rf_pin_ctrl(phy);
+ mt792x_mutex_release(phy->dev);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, ret == 0);
+}
+
const struct ieee80211_ops mt7925_ops = {
.tx = mt792x_tx,
.start = mt7925_start,
@@ -2234,6 +2259,8 @@ const struct ieee80211_ops mt7925_ops = {
.sta_statistics = mt792x_sta_statistics,
.sched_scan_start = mt7925_start_sched_scan,
.sched_scan_stop = mt7925_stop_sched_scan,
+ CFG80211_TESTMODE_CMD(mt7925_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(mt7925_testmode_dump)
#ifdef CONFIG_PM
.suspend = mt7925_suspend,
.resume = mt7925_resume,
@@ -2255,6 +2282,7 @@ const struct ieee80211_ops mt7925_ops = {
.link_info_changed = mt7925_link_info_changed,
.change_vif_links = mt7925_change_vif_links,
.change_sta_links = mt7925_change_sta_links,
+ .rfkill_poll = mt7925_rfkill_poll,
};
EXPORT_SYMBOL_GPL(mt7925_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index e61da76b2097..b8542be0d945 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -783,7 +783,7 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
int ret;
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(WSYS_CONFIG),
- &req, sizeof(req), false, NULL);
+ &req, sizeof(req), true, NULL);
return ret;
}
@@ -974,6 +974,23 @@ int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable)
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_deep_sleep);
+int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev)
+{
+ char cmd[64];
+ int ret = 0;
+
+ snprintf(cmd, sizeof(cmd), "ThermalProtGband %d %d %d %d %d %d %d %d %d %d",
+ 0, 100, 90, 80, 30, 1, 1, 115, 105, 5);
+ ret = mt7925_mcu_chip_config(dev, cmd);
+
+ snprintf(cmd, sizeof(cmd), "ThermalProtAband %d %d %d %d %d %d %d %d %d %d",
+ 1, 100, 90, 80, 30, 1, 1, 115, 105, 5);
+ ret |= mt7925_mcu_chip_config(dev, cmd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt7925_mcu_set_thermal_protect);
+
int mt7925_run_firmware(struct mt792x_dev *dev)
{
int err;
@@ -1424,7 +1441,7 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
};
return mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_CMD(EFUSE_CTRL),
- &req, sizeof(req), false, NULL);
+ &req, sizeof(req), true, NULL);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom);
@@ -1924,14 +1941,14 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta);
}
-
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
}
if (!info->enable) {
mt7925_mcu_sta_remove_tlv(skb);
mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF,
sizeof(struct tlv));
+ } else {
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -2046,8 +2063,6 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
},
};
- mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true);
-
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
true);
}
@@ -2298,6 +2313,40 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int
return skb;
}
+static
+void mt7925_mcu_bss_eht_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
+
+ struct bss_eht_tlv *req;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_EHT, sizeof(*req));
+ req = (struct bss_eht_tlv *)tlv;
+ req->is_eth_dscb_present = chandef->punctured ? 1 : 0;
+ req->eht_dis_sub_chan_bitmap = cpu_to_le16(chandef->punctured);
+}
+
+int mt7925_mcu_set_eht_pp(struct mt76_phy *phy, struct mt76_vif_link *mvif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_eht_tlv(skb, phy, link_conf, ctx);
+
+ return mt76_mcu_skb_send_msg(phy->dev, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
@@ -2764,7 +2813,7 @@ int mt7925_mcu_set_dbdc(struct mt76_phy *phy, bool enable)
conf->band = 0; /* unused */
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SET_DBDC_PARMS),
- false);
+ true);
return err;
}
@@ -2779,7 +2828,6 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_dev *mdev = phy->dev;
struct mt76_connac_mcu_scan_channel *chan;
struct sk_buff *skb;
-
struct scan_hdr_tlv *hdr;
struct scan_req_tlv *req;
struct scan_ssid_tlv *ssid;
@@ -2790,9 +2838,12 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct tlv *tlv;
int max_len;
+ if (test_bit(MT76_HW_SCANNING, &phy->state))
+ return -EBUSY;
+
max_len = sizeof(*hdr) + sizeof(*req) + sizeof(*ssid) +
- sizeof(*bssid) + sizeof(*chan_info) +
- sizeof(*misc) + sizeof(*ie);
+ sizeof(*bssid) * MT7925_RNR_SCAN_MAX_BSSIDS +
+ sizeof(*chan_info) + sizeof(*misc) + sizeof(*ie);
skb = mt76_mcu_msg_alloc(mdev, NULL, max_len);
if (!skb)
@@ -2815,6 +2866,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
for (i = 0; i < sreq->n_ssids; i++) {
if (!sreq->ssids[i].ssid_len)
continue;
+ if (i > MT7925_RNR_SCAN_MAX_BSSIDS)
+ break;
ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len);
memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid,
@@ -2824,10 +2877,31 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
ssid->ssid_type = n_ssids ? BIT(2) : BIT(0);
ssid->ssids_num = n_ssids;
- tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid));
- bssid = (struct scan_bssid_tlv *)tlv;
+ if (sreq->n_6ghz_params) {
+ u8 j;
+
+ mt76_connac_mcu_build_rnr_scan_param(mdev, sreq);
+
+ for (j = 0; j < mdev->rnr.bssid_num; j++) {
+ if (j > MT7925_RNR_SCAN_MAX_BSSIDS)
+ break;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID,
+ sizeof(*bssid));
+ bssid = (struct scan_bssid_tlv *)tlv;
- memcpy(bssid->bssid, sreq->bssid, ETH_ALEN);
+ ether_addr_copy(bssid->bssid, mdev->rnr.bssid[j]);
+ bssid->match_ch = mdev->rnr.channel[j];
+ bssid->match_ssid_ind = MT7925_RNR_SCAN_MAX_BSSIDS;
+ bssid->match_short_ssid_ind = MT7925_RNR_SCAN_MAX_BSSIDS;
+ }
+ req->scan_func |= SCAN_FUNC_RNR_SCAN;
+ } else {
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_BSSID, sizeof(*bssid));
+ bssid = (struct scan_bssid_tlv *)tlv;
+
+ ether_addr_copy(bssid->bssid, sreq->bssid);
+ }
tlv = mt76_connac_mcu_add_tlv(skb, UNI_SCAN_CHANNEL, sizeof(*chan_info));
chan_info = (struct scan_chan_info_tlv *)tlv;
@@ -2869,7 +2943,7 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
err = mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
- false);
+ true);
if (err < 0)
clear_bit(MT76_HW_SCANNING, &phy->state);
@@ -2975,7 +3049,7 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
}
return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
- false);
+ true);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_sched_scan_req);
@@ -3011,7 +3085,7 @@ mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
clear_bit(MT76_HW_SCHED_SCANNING, &phy->state);
return mt76_mcu_skb_send_msg(mdev, skb, MCU_UNI_CMD(SCAN_REQ),
- false);
+ true);
}
int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
@@ -3050,7 +3124,7 @@ int mt7925_mcu_cancel_hw_scan(struct mt76_phy *phy,
}
return mt76_mcu_send_msg(phy->dev, MCU_UNI_CMD(SCAN_REQ),
- &req, sizeof(req), false);
+ &req, sizeof(req), true);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_cancel_hw_scan);
@@ -3155,7 +3229,7 @@ int mt7925_mcu_set_channel_domain(struct mt76_phy *phy)
memcpy(__skb_push(skb, sizeof(req)), &req, sizeof(req));
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(SET_DOMAIN_INFO),
- false);
+ true);
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_channel_domain);
@@ -3305,9 +3379,18 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
- if (cmd == MCU_UNI_CMD(HIF_CTRL))
+ if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
+ cmd == MCU_UNI_CMD(CHIP_CONFIG))
uni_txd->option &= ~MCU_CMD_ACK;
+ if (mcu_cmd == MCU_UNI_CMD_TESTMODE_CTRL ||
+ mcu_cmd == MCU_UNI_CMD_TESTMODE_RX_STAT) {
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = 0x2;
+ else
+ uni_txd->option = 0x6;
+ }
+
goto exit;
}
@@ -3599,6 +3682,43 @@ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
return 0;
}
+int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy)
+{
+#define UNI_CMD_RADIO_STATUS_GET 0
+ struct mt792x_dev *dev = phy->dev;
+ struct sk_buff *skb;
+ int ret;
+ struct {
+ __le16 tag;
+ __le16 len;
+ u8 rsv[4];
+ } __packed req = {
+ .tag = UNI_CMD_RADIO_STATUS_GET,
+ .len = cpu_to_le16(sizeof(req)),
+ };
+ struct mt7925_radio_status_event {
+ __le16 tag;
+ __le16 len;
+
+ u8 data;
+ u8 rsv[3];
+ } __packed *status;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_UNI_CMD(RADIO_STATUS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ skb_pull(skb, sizeof(struct tlv));
+ status = (struct mt7925_radio_status_event *)skb->data;
+ ret = status->data;
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 8ac43feb26d6..ee6fb16e83c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -104,13 +104,6 @@ enum {
MT7925_TM_WIFISPECTRUM,
};
-struct mt7925_rftest_cmd {
- u8 action;
- u8 rsv[3];
- __le32 param0;
- __le32 param1;
-} __packed;
-
struct mt7925_rftest_evt {
__le32 param0;
__le32 param1;
@@ -196,6 +189,7 @@ enum {
UNI_SNIFFER_CONFIG,
};
+#define MT7925_RNR_SCAN_MAX_BSSIDS 10
struct scan_hdr_tlv {
/* fixed field */
u8 seq_num;
@@ -223,7 +217,7 @@ struct scan_req_tlv {
__le16 timeout_value;
__le16 probe_delay_time;
__le32 func_mask_ext;
-};
+} __packed;
struct scan_ssid_tlv {
__le16 tag;
@@ -235,9 +229,10 @@ struct scan_ssid_tlv {
* BIT(2) + ssid_type_ext BIT(0) specified SSID only
*/
u8 ssids_num;
- u8 pad[2];
- struct mt76_connac_mcu_scan_ssid ssids[4];
-};
+ u8 is_short_ssid;
+ u8 pad;
+ struct mt76_connac_mcu_scan_ssid ssids[MT7925_RNR_SCAN_MAX_BSSIDS];
+} __packed;
struct scan_bssid_tlv {
__le16 tag;
@@ -247,8 +242,9 @@ struct scan_bssid_tlv {
u8 match_ch;
u8 match_ssid_ind;
u8 rcpi;
- u8 pad[3];
-};
+ u8 match_short_ssid_ind;
+ u8 pad[2];
+} __packed;
struct scan_chan_info_tlv {
__le16 tag;
@@ -264,7 +260,7 @@ struct scan_chan_info_tlv {
u8 channels_num; /* valid when channel_type is 4 */
u8 pad[2];
struct mt76_connac_mcu_scan_channel channels[64];
-};
+} __packed;
struct scan_ie_tlv {
__le16 tag;
@@ -372,6 +368,19 @@ struct bss_mld_tlv {
u8 __rsv[3];
} __packed;
+struct bss_eht_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 is_eht_op_present;
+ u8 is_eth_dscb_present;
+ u8 eht_ctrl;
+ u8 eht_ccfs0;
+ u8 eht_ccfs1;
+ u8 pad1;
+ __le16 eht_dis_sub_chan_bitmap;
+ u8 pad2[4];
+} __packed;
+
struct sta_rec_ba_uni {
__le16 tag;
__le16 len;
@@ -589,6 +598,47 @@ struct roc_acquire_tlv {
u8 rsv[3];
} __packed;
+enum ENUM_CMD_TEST_CTRL_ACT {
+ CMD_TEST_CTRL_ACT_SWITCH_MODE = 0,
+ CMD_TEST_CTRL_ACT_SET_AT = 1,
+ CMD_TEST_CTRL_ACT_GET_AT = 2,
+ CMD_TEST_CTRL_ACT_SET_AT_ENG = 3,
+ CMD_TEST_CTRL_ACT_GET_AT_ENG = 4,
+ CMD_TEST_CTRL_ACT_NUM
+};
+
+enum ENUM_CMD_TEST_CTRL_ACT_SWITCH_MODE_OP {
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_NORMAL = 0,
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_RF_TEST = 1,
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_ICAP = 2,
+ CMD_TEST_CTRL_ACT_SWITCH_MODE_NUM
+};
+
+union testmode_data {
+ __le32 op_mode;
+ __le32 channel_freq;
+ u8 rf_at_info[84];
+};
+
+union testmode_evt {
+ __le32 op_mode;
+ __le32 channel_freq;
+ u8 rf_at_info[1024];
+};
+
+struct uni_cmd_testmode_ctrl {
+ u16 tag;
+ u16 length;
+ u8 action;
+ u8 reserved[3];
+ union testmode_data data;
+} __packed;
+
+struct mt7925_rftest_cmd {
+ u8 padding[4];
+ struct uni_cmd_testmode_ctrl ctrl;
+} __packed;
+
static inline enum connac3_mcu_cipher_type
mt7925_mcu_get_cipher(int cipher)
{
@@ -637,11 +687,15 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
struct ieee80211_bss_conf *link_conf);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
+int mt7925_mcu_set_thermal_protect(struct mt792x_dev *dev);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif_link *mvif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
+int mt7925_mcu_set_eht_pp(struct mt76_phy *phy, struct mt76_vif_link *mvif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct ieee80211_bss_conf *link_conf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 4e50f2597ccd..1b165d0d8bd3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -365,5 +365,11 @@ int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int link_id);
+int mt7925_mcu_wf_rf_pin_ctrl(struct mt792x_phy *phy);
+
+int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index c7b5dc1dbb34..89dc30f7c6b7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -31,6 +31,10 @@ static void mt7925e_unregister_device(struct mt792x_dev *dev)
{
int i;
struct mt76_connac_pm *pm = &dev->pm;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ if (dev->phy.chip_cap & MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN)
+ wiphy_rfkill_stop_polling(hw->wiphy);
cancel_work_sync(&dev->init_work);
mt76_unregister_device(&dev->mt76);
@@ -490,9 +494,6 @@ static int mt7925_pci_suspend(struct device *device)
/* disable interrupt */
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
- mt76_wr(dev, MT_WFDMA0_HOST_INT_DIS,
- dev->irq_map->tx.all_complete_mask |
- MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
index 985794a40c1a..547489092c29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h
@@ -28,7 +28,7 @@
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
-#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x228)
+#define MT_WFDMA0_HOST_INT_ENA MT_WFDMA0(0x204)
#define MT_WFDMA0_HOST_INT_DIS MT_WFDMA0(0x22c)
#define HOST_RX_DONE_INT_ENA4 BIT(12)
#define HOST_RX_DONE_INT_ENA5 BIT(13)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c
new file mode 100644
index 000000000000..a3c97164ba21
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/testmode.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7925.h"
+#include "mcu.h"
+
+#define MT7925_EVT_RSP_LEN 512
+
+enum mt7925_testmode_attr {
+ MT7925_TM_ATTR_UNSPEC,
+ MT7925_TM_ATTR_SET,
+ MT7925_TM_ATTR_QUERY,
+ MT7925_TM_ATTR_RSP,
+
+ /* keep last */
+ NUM_MT7925_TM_ATTRS,
+ MT7925_TM_ATTR_MAX = NUM_MT7925_TM_ATTRS - 1,
+};
+
+struct mt7925_tm_cmd {
+ u8 padding[4];
+ struct uni_cmd_testmode_ctrl c;
+} __packed;
+
+struct mt7925_tm_evt {
+ u32 param0;
+ u32 param1;
+} __packed;
+
+static const struct nla_policy mt7925_tm_policy[NUM_MT7925_TM_ATTRS] = {
+ [MT7925_TM_ATTR_SET] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7925_tm_cmd)),
+ [MT7925_TM_ATTR_QUERY] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7925_tm_cmd)),
+};
+
+static int
+mt7925_tm_set(struct mt792x_dev *dev, struct mt7925_tm_cmd *req)
+{
+ struct mt7925_rftest_cmd cmd;
+ struct mt7925_rftest_cmd *pcmd = &cmd;
+ bool testmode = false, normal = false;
+ struct mt76_connac_pm *pm = &dev->pm;
+ struct mt76_phy *phy = &dev->mphy;
+ int ret = -ENOTCONN;
+
+ memset(pcmd, 0, sizeof(*pcmd));
+ memcpy(pcmd, req, sizeof(struct mt7925_tm_cmd));
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (pcmd->ctrl.action == CMD_TEST_CTRL_ACT_SWITCH_MODE) {
+ if (pcmd->ctrl.data.op_mode == CMD_TEST_CTRL_ACT_SWITCH_MODE_NORMAL)
+ normal = true;
+ else
+ testmode = true;
+ }
+
+ if (testmode) {
+ /* Make sure testmode running on full power mode */
+ pm->enable = false;
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+ __mt792x_mcu_drv_pmctrl(dev);
+
+ phy->test.state = MT76_TM_STATE_ON;
+ }
+
+ if (!mt76_testmode_enabled(phy))
+ goto out;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(TESTMODE_CTRL), &cmd,
+ sizeof(cmd), false);
+
+ if (ret)
+ goto out;
+
+ if (normal) {
+ /* Switch back to the normal world */
+ phy->test.state = MT76_TM_STATE_OFF;
+ pm->enable = true;
+ }
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7925_tm_query(struct mt792x_dev *dev, struct mt7925_tm_cmd *req,
+ char *evt_resp)
+{
+ struct mt7925_rftest_cmd cmd;
+ char *pcmd = (char *)&cmd;
+ struct sk_buff *skb = NULL;
+ int ret = 1;
+
+ memset(pcmd, 0, sizeof(*pcmd));
+ memcpy(pcmd + 4, (char *)&req->c, sizeof(struct uni_cmd_testmode_ctrl));
+
+ if (*((uint16_t *)req->padding) == MCU_UNI_CMD_TESTMODE_CTRL)
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_QUERY(TESTMODE_CTRL),
+ &cmd, sizeof(cmd), true, &skb);
+ else if (*((uint16_t *)req->padding) == MCU_UNI_CMD_TESTMODE_RX_STAT)
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_UNI_QUERY(TESTMODE_RX_STAT),
+ &cmd, sizeof(cmd), true, &skb);
+
+ if (ret)
+ goto out;
+
+ memcpy((char *)evt_resp, (char *)skb->data + 8, MT7925_EVT_RSP_LEN);
+
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7925_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ struct mt76_phy *mphy = hw->priv;
+ struct mt792x_phy *phy = mphy->priv;
+ int err;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ return -ENOTCONN;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[MT76_TM_ATTR_DRV_DATA]) {
+ struct nlattr *drv_tb[NUM_MT7925_TM_ATTRS], *data;
+ int ret;
+
+ data = tb[MT76_TM_ATTR_DRV_DATA];
+ ret = nla_parse_nested_deprecated(drv_tb,
+ MT7925_TM_ATTR_MAX,
+ data, mt7925_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ data = drv_tb[MT7925_TM_ATTR_SET];
+ if (data)
+ return mt7925_tm_set(phy->dev, nla_data(data));
+ }
+
+ return -EINVAL;
+}
+
+int mt7925_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len)
+{
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ struct mt76_phy *mphy = hw->priv;
+ struct mt792x_phy *phy = mphy->priv;
+ int err;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR) ||
+ !mt76_testmode_enabled(mphy))
+ return -ENOTCONN;
+
+ if (cb->args[2]++ > 0)
+ return -ENOENT;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[MT76_TM_ATTR_DRV_DATA]) {
+ struct nlattr *drv_tb[NUM_MT7925_TM_ATTRS], *data;
+ int ret;
+
+ data = tb[MT76_TM_ATTR_DRV_DATA];
+ ret = nla_parse_nested_deprecated(drv_tb,
+ MT7925_TM_ATTR_MAX,
+ data, mt7925_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ data = drv_tb[MT7925_TM_ATTR_QUERY];
+ if (data) {
+ char evt_resp[MT7925_EVT_RSP_LEN];
+
+ err = mt7925_tm_query(phy->dev, nla_data(data),
+ evt_resp);
+ if (err)
+ return err;
+
+ return nla_put(msg, MT7925_TM_ATTR_RSP,
+ sizeof(evt_resp), evt_resp);
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c b/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c
index ccab0d7b9be4..303d6e80a666 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/coredump.c
@@ -48,8 +48,8 @@ const struct mt7996_mem_region*
mt7996_coredump_get_mem_layout(struct mt7996_dev *dev, u32 *num)
{
switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
- case 0x7991:
+ case MT7996_DEVICE_ID:
+ case MT7996_DEVICE_ID_2:
*num = ARRAY_SIZE(mt7996_mem_regions);
return &mt7996_mem_regions[0];
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 4a28db17a287..0ab827f52fd7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -222,18 +222,27 @@ static const struct file_operations mt7996_sys_recovery_ops = {
static int
mt7996_radar_trigger(void *data, u64 val)
{
+#define RADAR_MAIN_CHAIN 1
+#define RADAR_BACKGROUND 2
struct mt7996_dev *dev = data;
+ struct mt7996_phy *phy = mt7996_band_phy(dev, NL80211_BAND_5GHZ);
+ int rdd_idx;
- if (val > MT_RX_SEL2)
+ if (!phy || !val || val > RADAR_BACKGROUND)
return -EINVAL;
- if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
+ if (val == RADAR_BACKGROUND && !dev->rdd2_phy) {
dev_err(dev->mt76.dev, "Background radar is not enabled\n");
return -EINVAL;
}
- return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
- val, 0, 0);
+ rdd_idx = mt7996_get_rdd_idx(phy, val == RADAR_BACKGROUND);
+ if (rdd_idx < 0) {
+ dev_err(dev->mt76.dev, "No RDD found\n");
+ return -EINVAL;
+ }
+
+ return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, rdd_idx, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 69a7d9b2e38b..c8bef0b2a144 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -55,20 +55,32 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
/* rx queue */
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
+ /* for mt7990, RX ring 1 is for SDO instead */
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
-
- /* mt7996: band0 and band1, mt7992: band0 */
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
- RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
+ if (mt7996_has_wa(dev))
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN,
+ MT7996_RXQ_MCU_WA_MAIN);
- if (is_mt7996(&dev->mt76)) {
+ switch (mt76_chip(&dev->mt76)) {
+ case MT7992_DEVICE_ID:
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ break;
+ case MT7990_DEVICE_ID:
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0);
+ if (dev->hif2)
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0,
+ MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1);
+ break;
+ case MT7996_DEVICE_ID:
+ default:
/* mt7996 band2 */
- RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
- } else {
- /* mt7992 band1 */
- RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
- RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
+ break;
}
if (dev->has_rro) {
@@ -104,9 +116,11 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
}
/* mcu tx queue */
- MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
- MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
+ if (mt7996_has_wa(dev))
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA,
+ MT7996_TXQ_MCU_WA);
}
static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
@@ -121,43 +135,62 @@ static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
{
u16 base = 0;
- u8 queue;
+ u8 queue, val;
#define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
/* prefetch SRAM wrapping boundary for tx/rx ring. */
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2));
+ /* Tx Command Rings */
+ val = is_mt7996(&dev->mt76) ? 2 : 4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val));
+ if (mt7996_has_wa(dev))
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val));
+
+ /* Tx Data Rings */
mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2));
-
- queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2));
-
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
+ if (!is_mt7996(&dev->mt76) || dev->hif2)
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
+ if (is_mt7996(&dev->mt76))
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
+
+ /* Rx Event Rings */
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val));
+
+ /* Rx TxFreeDone From WA Rings */
+ if (mt7996_has_wa(dev)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val));
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val));
+ }
+ /* Rx TxFreeDone From MAC Rings */
+ val = is_mt7996(&dev->mt76) ? 4 : 8;
+ if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
+ if (is_mt7990(&dev->mt76) && dev->hif2)
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
+ else if (is_mt7996(&dev->mt76) && dev->has_rro)
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
+
+ /* Rx Data Rings */
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
+ /* Rx RRO Rings */
if (dev->has_rro) {
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
- PREFETCH(0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
- PREFETCH(0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
- PREFETCH(0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
- PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
+
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val));
+ if (is_mt7996(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
+ PREFETCH(val));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
+ PREFETCH(val));
+ }
}
#undef PREFETCH
@@ -269,6 +302,9 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
mtk_wed_device_start(wed, wed_irq_mask);
}
+ if (!mt7996_has_wa(dev))
+ irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
+ MT_INT_RX(MT_RXQ_BAND1_WA));
irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
mt7996_irq_enable(dev, irq_mask);
@@ -474,12 +510,14 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
- MT_MCUQ_ID(MT_MCUQ_WA),
- MT7996_TX_MCU_RING_SIZE,
- MT_MCUQ_RING_BASE(MT_MCUQ_WA));
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7996_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
+ if (ret)
+ return ret;
+ }
/* firmware download */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
@@ -493,16 +531,16 @@ int mt7996_dma_init(struct mt7996_dev *dev)
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT_RXQ_ID(MT_RXQ_MCU),
MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
+ MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
- /* event from WA */
+ /* event from WA, or SDO event for mt7990 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7996_RX_MCU_RING_SIZE_WA,
- MT_RX_BUF_SIZE,
+ MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
@@ -527,13 +565,41 @@ int mt7996_dma_init(struct mt7996_dev *dev)
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
- MT_RXQ_ID(MT_RXQ_MAIN_WA),
- MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
+
+ if (!mt7996_has_wa(dev) && dev->hif2) {
+ if (mtk_wed_device_active(wed)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx data queue for mt7996 band2 */
@@ -573,14 +639,16 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for mt7992 band1 */
- rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
- MT_RXQ_ID(MT_RXQ_BAND1_WA),
- MT7996_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE,
- rx_base);
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ rx_base);
+ if (ret)
+ return ret;
+ }
}
if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 53dfac02f8af..87c6192b6384 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -13,10 +13,12 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
u16 val = get_unaligned_le16(eeprom);
switch (val) {
- case 0x7990:
+ case MT7996_DEVICE_ID:
return is_mt7996(&dev->mt76) ? 0 : -EINVAL;
- case 0x7992:
+ case MT7992_DEVICE_ID:
return is_mt7992(&dev->mt76) ? 0 : -EINVAL;
+ case MT7990_DEVICE_ID:
+ return is_mt7990(&dev->mt76) ? 0 : -EINVAL;
default:
return -EINVAL;
}
@@ -25,7 +27,7 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
static char *mt7996_eeprom_name(struct mt7996_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
- case 0x7992:
+ case MT7992_DEVICE_ID:
switch (dev->var.type) {
case MT7992_VAR_TYPE_23:
if (dev->var.fem == MT7996_FEM_INT)
@@ -39,7 +41,11 @@ static char *mt7996_eeprom_name(struct mt7996_dev *dev)
return MT7992_EEPROM_DEFAULT_MIX;
return MT7992_EEPROM_DEFAULT;
}
- case 0x7990:
+ case MT7990_DEVICE_ID:
+ if (dev->var.fem == MT7996_FEM_INT)
+ return MT7990_EEPROM_DEFAULT_INT;
+ return MT7990_EEPROM_DEFAULT;
+ case MT7996_DEVICE_ID:
default:
switch (dev->var.type) {
case MT7996_VAR_TYPE_233:
@@ -304,6 +310,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
phy->has_aux_rx = true;
mphy->antenna_mask = BIT(nss) - 1;
+ phy->orig_antenna_mask = mphy->antenna_mask;
mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx];
phy->orig_chainmask = mphy->chainmask;
dev->chainmask |= mphy->chainmask;
@@ -370,3 +377,30 @@ s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band)
return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
}
+
+bool mt7996_eeprom_has_background_radar(struct mt7996_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case MT7996_DEVICE_ID:
+ if (dev->var.type == MT7996_VAR_TYPE_233)
+ return false;
+ break;
+ case MT7992_DEVICE_ID:
+ if (dev->var.type == MT7992_VAR_TYPE_23)
+ return false;
+ break;
+ case MT7990_DEVICE_ID: {
+ u8 path, rx_path, nss, *eeprom = dev->mt76.eeprom.data;
+
+ mt7996_eeprom_parse_stream(eeprom, MT_BAND1, &path, &rx_path, &nss);
+ /* Disable background radar capability in 3T3R */
+ if (path == 3 || rx_path == 3)
+ return false;
+ break;
+ }
+ default:
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 6b660424aedc..a9599c286328 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -217,6 +217,9 @@ static int mt7996_thermal_init(struct mt7996_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s.%d",
wiphy_name(wiphy), phy->mt76->band_idx);
+ if (!name)
+ return -ENOMEM;
+
snprintf(cname, sizeof(cname), "cooling_device%d", phy->mt76->band_idx);
cdev = thermal_cooling_device_register(name, phy, &mt7996_thermal_ops);
@@ -314,8 +317,8 @@ static void __mt7996_init_txpower(struct mt7996_phy *phy,
struct ieee80211_supported_band *sband)
{
struct mt7996_dev *dev = phy->dev;
- int i, nss = hweight16(phy->mt76->chainmask);
- int nss_delta = mt76_tx_power_nss_delta(nss);
+ int i, n_chains = hweight16(phy->mt76->chainmask);
+ int path_delta = mt76_tx_power_path_delta(n_chains);
int pwr_delta = mt7996_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
@@ -327,7 +330,7 @@ static void __mt7996_init_txpower(struct mt7996_phy *phy,
target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
- target_power += nss_delta;
+ target_power += path_delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
@@ -440,6 +443,9 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
hw->queues = 4;
hw->max_rx_aggregation_subframes = max_subframes;
hw->max_tx_aggregation_subframes = max_subframes;
+ if (is_mt7990(mdev) && dev->has_eht)
+ hw->max_tx_aggregation_subframes = 512;
+
hw->netdev_features = NETIF_F_RXCSUM;
if (mtk_wed_device_active(wed))
hw->netdev_features |= NETIF_F_HW_TC;
@@ -472,7 +478,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
- if (mt7996_has_background_radar(dev) &&
+ if (mt7996_eeprom_has_background_radar(dev) &&
(!mdev->dev->of_node ||
!of_property_read_bool(mdev->dev->of_node,
"mediatek,disable-radar-background")))
@@ -929,13 +935,13 @@ static int mt7996_variant_type_init(struct mt7996_dev *dev)
u8 var_type;
switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
+ case MT7996_DEVICE_ID:
if (val & MT_PAD_GPIO_2ADIE_TBTC)
var_type = MT7996_VAR_TYPE_233;
else
var_type = MT7996_VAR_TYPE_444;
break;
- case 0x7992:
+ case MT7992_DEVICE_ID:
if (val & MT_PAD_GPIO_ADIE_SINGLE)
var_type = MT7992_VAR_TYPE_23;
else if (u32_get_bits(val, MT_PAD_GPIO_ADIE_COMB_7992))
@@ -943,6 +949,9 @@ static int mt7996_variant_type_init(struct mt7996_dev *dev)
else
return -EINVAL;
break;
+ case MT7990_DEVICE_ID:
+ var_type = MT7990_VAR_TYPE_23;
+ break;
default:
return -EINVAL;
}
@@ -1061,10 +1070,10 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
- if (is_mt7996(phy->mt76->dev))
- *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
- else
+ if (is_mt7992(phy->mt76->dev))
*cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4);
+ else
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -1107,18 +1116,17 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
elem->phy_cap_info[2] |= c;
c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
- if (is_mt7996(phy->mt76->dev))
- c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g);
- else
+ if (is_mt7992(phy->mt76->dev))
c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
(IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 * non_2g);
+ else
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ (IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 * non_2g);
elem->phy_cap_info[4] |= c;
@@ -1318,6 +1326,9 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ eht_cap_elem->mac_cap_info[1] |=
+ IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK;
+
eht_cap_elem->phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index d89c06f47997..0dbd4662bc84 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -647,6 +647,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
}
+ /* IEEE 802.11 fragmentation can only be applied to unicast frames.
+ * Hence, drop fragments with multicast/broadcast RA.
+ * This check fixes vulnerabilities, like CVE-2020-26145.
+ */
+ if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
+ FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
+ return -EINVAL;
+
hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
if (hdr_trans && ieee80211_has_morefrags(fc)) {
if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
@@ -789,10 +797,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
if (ieee80211_is_action(fc) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
- mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ if (is_mt7990(&dev->mt76))
+ txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
tid = MT_TX_ADDBA;
- else if (ieee80211_is_mgmt(hdr->frame_control))
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = MT_TX_NORMAL;
+ }
val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
FIELD_PREP(MT_TXD1_HDR_INFO,
@@ -987,12 +998,32 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
}
}
+static bool
+mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return true;
+
+ /* for SDO to bypass specific data frame */
+ if (!mt7996_has_wa(dev)) {
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return true;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ !ieee80211_is_data_present(hdr->frame_control))
+ return true;
+ }
+
+ return false;
+}
+
int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
@@ -1017,8 +1048,11 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return id;
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
- mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- pid, qid, 0);
+ memset(txwi_ptr, 0, MT_TXD_SIZE);
+ /* Transmit non qos data by 802.11 header and need to fill txd by host*/
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ pid, qid, 0);
txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
@@ -1035,13 +1069,15 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
txp->fw.nbuf = nbuf;
- txp->fw.flags =
- cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
if (!key)
txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
- if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
+ if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
@@ -1179,6 +1215,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
void *end = data + len;
bool wake = false;
u16 total, count = 0;
+ u8 ver;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
@@ -1192,7 +1229,8 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
}
- if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5))
+ ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
+ if (WARN_ON_ONCE(ver < 5))
return;
total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
@@ -1214,11 +1252,16 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
if (!sta)
- continue;
+ goto next;
msta_link = container_of(wcid, struct mt7996_sta_link,
wcid);
mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
+next:
+ /* ver 7 has a new DW with pair = 1, skip it */
+ if (ver == 7 && ((void *)(cur_info + 1) < end) &&
+ (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
+ cur_info++;
continue;
} else if (info & MT_TXFREE_INFO_HEADER) {
u32 tx_retries = 0, tx_failed = 0;
@@ -2411,16 +2454,15 @@ void mt7996_mac_work(struct work_struct *work)
static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
+ int rdd_idx = mt7996_get_rdd_idx(phy, false);
+
+ if (rdd_idx < 0)
+ return;
- if (phy->rdd_state & BIT(0))
- mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
- MT_RX_SEL0, 0);
- if (phy->rdd_state & BIT(1))
- mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
- MT_RX_SEL0, 0);
+ mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
}
-static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
+static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
{
int err, region;
@@ -2437,44 +2479,30 @@ static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
break;
}
- err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
- MT_RX_SEL0, region);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
if (err < 0)
return err;
- return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1);
}
static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7996_dev *dev = phy->dev;
- u8 band_idx = phy->mt76->band_idx;
- int err;
+ int err, rdd_idx;
- /* start CAC */
- err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
- MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ rdd_idx = mt7996_get_rdd_idx(phy, false);
+ if (rdd_idx < 0)
+ return -EINVAL;
- err = mt7996_dfs_start_rdd(dev, band_idx);
+ /* start CAC */
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
if (err < 0)
return err;
- phy->rdd_state |= BIT(band_idx);
-
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7996_dfs_start_rdd(dev, 1);
- if (err < 0)
- return err;
-
- phy->rdd_state |= BIT(1);
- }
+ err = mt7996_dfs_start_rdd(dev, rdd_idx);
- return 0;
+ return err;
}
static int
@@ -2515,12 +2543,12 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
enum mt76_dfs_state dfs_state, prev_state;
- int err;
+ int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
prev_state = phy->mt76->dfs_state;
dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (prev_state == dfs_state)
+ if (prev_state == dfs_state || rdd_idx < 0)
return 0;
if (prev_state == MT_DFS_STATE_UNKNOWN)
@@ -2544,8 +2572,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
if (dfs_state == MT_DFS_STATE_CAC)
return 0;
- err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
if (err < 0) {
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
return err;
@@ -2555,8 +2582,7 @@ int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
return 0;
stop:
- err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
- phy->mt76->band_idx, MT_RX_SEL0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 91c64e3a0860..78ae9f5cb176 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -68,11 +68,13 @@ static int mt7996_start(struct ieee80211_hw *hw)
static void mt7996_stop_phy(struct mt7996_phy *phy)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev;
if (!phy || !test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
+ dev = phy->dev;
+
cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
@@ -414,11 +416,13 @@ static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy)
static void mt7996_set_monitor(struct mt7996_phy *phy, bool enabled)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_dev *dev;
if (!phy)
return;
+ dev = phy->dev;
+
if (enabled == !(phy->rxfilter & MT_WF_RFCR_DROP_OTHER_UC))
return;
@@ -684,7 +688,7 @@ mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
n_chains = hweight16(phy->mt76->chainmask);
- delta = mt76_tx_power_nss_delta(n_chains);
+ delta = mt76_tx_power_path_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2);
return 0;
@@ -987,7 +991,7 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
unsigned int link_id;
- int err;
+ int err = 0;
for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf;
@@ -998,16 +1002,22 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
continue;
link_conf = link_conf_dereference_protected(vif, link_id);
- if (!link_conf)
+ if (!link_conf) {
+ err = -EINVAL;
goto error_unlink;
+ }
link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
+ if (!link) {
+ err = -EINVAL;
goto error_unlink;
+ }
link_sta = link_sta_dereference_protected(sta, link_id);
- if (!link_sta)
+ if (!link_sta) {
+ err = -EINVAL;
goto error_unlink;
+ }
err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
link_id);
@@ -1244,7 +1254,7 @@ unlock:
static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- int i, ret;
+ int i, ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -1518,7 +1528,8 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
u8 shift = dev->chainshift[band_idx];
phy->mt76->chainmask = tx_ant & phy->orig_chainmask;
- phy->mt76->antenna_mask = phy->mt76->chainmask >> shift;
+ phy->mt76->antenna_mask = (phy->mt76->chainmask >> shift) &
+ phy->orig_antenna_mask;
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index ddd555942c73..f0adc0b4b8b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -13,7 +13,7 @@
#define fw_name(_dev, name, ...) ({ \
char *_fw; \
switch (mt76_chip(&(_dev)->mt76)) { \
- case 0x7992: \
+ case MT7992_DEVICE_ID: \
switch ((_dev)->var.type) { \
case MT7992_VAR_TYPE_23: \
_fw = MT7992_##name##_23; \
@@ -22,7 +22,10 @@
_fw = MT7992_##name; \
} \
break; \
- case 0x7990: \
+ case MT7990_DEVICE_ID: \
+ _fw = MT7990_##name; \
+ break; \
+ case MT7996_DEVICE_ID: \
default: \
switch ((_dev)->var.type) { \
case MT7996_VAR_TYPE_233: \
@@ -265,7 +268,7 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
txd_len = cmd & __MCU_CMD_FIELD_UNI ? sizeof(*uni_txd) : sizeof(*mcu_txd);
txd = (__le32 *)skb_push(skb, txd_len);
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state) && mt7996_has_wa(dev))
qid = MT_MCUQ_WA;
else
qid = MT_MCUQ_WM;
@@ -335,8 +338,12 @@ exit:
int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
{
struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
__le32 args[3];
- } req = {
+ } __packed req = {
.args = {
cpu_to_le32(a1),
cpu_to_le32(a2),
@@ -344,7 +351,16 @@ int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
},
};
- return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false);
+ if (mt7996_has_wa(dev))
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req.args,
+ sizeof(req.args), false);
+
+ req.tag = cpu_to_le16(cmd == MCU_WA_PARAM_CMD(QUERY) ? UNI_CMD_SDO_QUERY :
+ UNI_CMD_SDO_SET);
+ req.len = cpu_to_le16(sizeof(req) - 4);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(SDO), &req,
+ sizeof(req), false);
}
static void
@@ -364,21 +380,27 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
r = (struct mt7996_mcu_rdd_report *)skb->data;
- if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
- return;
-
- if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
- return;
-
- if (r->band_idx == MT_RX_SEL2)
+ switch (r->rdd_idx) {
+ case MT_RDD_IDX_BAND2:
+ mphy = dev->mt76.phys[MT_BAND2];
+ break;
+ case MT_RDD_IDX_BAND1:
+ mphy = dev->mt76.phys[MT_BAND1];
+ break;
+ case MT_RDD_IDX_BACKGROUND:
+ if (!dev->rdd2_phy)
+ return;
mphy = dev->rdd2_phy->mt76;
- else
- mphy = dev->mt76.phys[r->band_idx];
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx);
+ return;
+ }
if (!mphy)
return;
- if (r->band_idx == MT_RX_SEL2)
+ if (r->rdd_idx == MT_RDD_IDX_BACKGROUND)
cfg80211_background_radar_event(mphy->hw->wiphy,
&dev->rdd2_chandef,
GFP_ATOMIC);
@@ -2242,9 +2264,6 @@ mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
if (!link)
continue;
- if (!msta_link)
- continue;
-
mld_setup_link->wcid = cpu_to_le16(msta_link->wcid.idx);
mld_setup_link->bss_idx = link->mt76.idx;
mld_setup_link++;
@@ -3011,6 +3030,9 @@ static int mt7996_load_ram(struct mt7996_dev *dev)
if (ret)
return ret;
+ if (!mt7996_has_wa(dev))
+ return 0;
+
ret = __mt7996_load_ram(dev, "DSP", fw_name(dev, FIRMWARE_DSP),
MT7996_RAM_TYPE_DSP);
if (ret)
@@ -3021,10 +3043,9 @@ static int mt7996_load_ram(struct mt7996_dev *dev)
}
static int
-mt7996_firmware_state(struct mt7996_dev *dev, bool wa)
+mt7996_firmware_state(struct mt7996_dev *dev, u8 fw_state)
{
- u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
- wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE, fw_state);
if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
state, 1000)) {
@@ -3056,13 +3077,14 @@ mt7996_mcu_restart(struct mt76_dev *dev)
static int mt7996_load_firmware(struct mt7996_dev *dev)
{
+ u8 fw_state;
int ret;
/* make sure fw is download state */
- if (mt7996_firmware_state(dev, false)) {
+ if (mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD)) {
/* restart firmware once */
mt7996_mcu_restart(&dev->mt76);
- ret = mt7996_firmware_state(dev, false);
+ ret = mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD);
if (ret) {
dev_err(dev->mt76.dev,
"Firmware is not ready for download\n");
@@ -3078,7 +3100,8 @@ static int mt7996_load_firmware(struct mt7996_dev *dev)
if (ret)
return ret;
- ret = mt7996_firmware_state(dev, true);
+ fw_state = mt7996_has_wa(dev) ? FW_STATE_RDY : FW_STATE_NORMAL_TRX;
+ ret = mt7996_firmware_state(dev, fw_state);
if (ret)
return ret;
@@ -3216,13 +3239,15 @@ int mt7996_mcu_init_firmware(struct mt7996_dev *dev)
if (ret)
return ret;
- ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
- if (ret)
- return ret;
+ if (mt7996_has_wa(dev)) {
+ ret = mt7996_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
+ if (ret)
+ return ret;
- ret = mt7996_mcu_set_mwds(dev, 1);
- if (ret)
- return ret;
+ ret = mt7996_mcu_set_mwds(dev, 1);
+ if (ret)
+ return ret;
+ }
ret = mt7996_mcu_init_rx_airtime(dev);
if (ret)
@@ -3248,7 +3273,7 @@ int mt7996_mcu_init(struct mt7996_dev *dev)
void mt7996_mcu_exit(struct mt7996_dev *dev)
{
mt7996_mcu_restart(&dev->mt76);
- if (mt7996_firmware_state(dev, false)) {
+ if (mt7996_firmware_state(dev, FW_STATE_FW_DOWNLOAD)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
goto out;
}
@@ -3535,11 +3560,10 @@ int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
struct cfg80211_chan_def *chandef)
{
struct mt7996_dev *dev = phy->dev;
- int err, region;
+ int err, region, rdd_idx = mt7996_get_rdd_idx(phy, true);
if (!chandef) { /* disable offchain */
- err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, MT_RX_SEL2,
- 0, 0);
+ err = mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
if (err)
return err;
@@ -3565,8 +3589,7 @@ int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
break;
}
- return mt7996_mcu_rdd_cmd(dev, RDD_START, MT_RX_SEL2,
- 0, region);
+ return mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
}
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
@@ -4433,8 +4456,7 @@ int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable)
&req, sizeof(req), true);
}
-int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
- u8 rx_sel, u8 val)
+int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val)
{
struct {
u8 _rsv[4];
@@ -4451,8 +4473,7 @@ int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
.tag = cpu_to_le16(UNI_RDD_CTRL_PARM),
.len = cpu_to_le16(sizeof(req) - 4),
.ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
+ .rdd_idx = rdd_idx,
.val = val,
};
@@ -4742,7 +4763,26 @@ int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode)
mode > mt76_connac_lmac_mapping(IEEE80211_AC_VO))
return -EINVAL;
+ if (!mt7996_has_wa(dev)) {
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 cp_mode;
+ u8 rsv[3];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_CMD_SDO_CP_MODE),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .cp_mode = mode,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_UNI_CMD(SDO),
+ &req, sizeof(req), false);
+ }
+
cp_mode = cpu_to_le32(mode);
+
return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT),
&cp_mode, sizeof(cp_mode), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 2ab6a53bee86..130ea95626d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -69,7 +69,7 @@ struct mt7996_mcu_rdd_report {
__le16 tag;
__le16 len;
- u8 band_idx;
+ u8 rdd_idx;
u8 long_detected;
u8 constant_prf_detected;
u8 staggered_prf_detected;
@@ -832,13 +832,13 @@ enum {
sizeof(struct sta_rec_eht_mld) + \
sizeof(struct tlv))
-#define MT7996_MAX_BEACON_SIZE 1338
#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct bss_bcn_content_tlv) + \
4 + MT_TXD_SIZE + \
sizeof(struct bss_bcn_cntdwn_tlv) + \
sizeof(struct bss_bcn_mbss_tlv))
-#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
+#define MT7996_MAX_BSS_OFFLOAD_SIZE 2048
+#define MT7996_MAX_BEACON_SIZE (MT7996_MAX_BSS_OFFLOAD_SIZE - \
MT7996_BEACON_UPDATE_SIZE)
enum {
@@ -938,6 +938,12 @@ enum {
};
enum {
+ UNI_CMD_SDO_SET = 1,
+ UNI_CMD_SDO_QUERY,
+ UNI_CMD_SDO_CP_MODE = 6,
+};
+
+enum {
MT7996_SEC_MODE_PLAIN,
MT7996_SEC_MODE_AES,
MT7996_SEC_MODE_SCRAMBLE,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 13b188e281bd..30b40f4a91be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -54,6 +54,17 @@ static const u32 mt7996_offs[] = {
[MIB_BSCR7] = 0x9e8,
[MIB_BSCR17] = 0xa10,
[MIB_TRDR1] = 0xa28,
+ [HIF_REMAP_L1] = 0x24,
+ [HIF_REMAP_BASE_L1] = 0x130000,
+ [HIF_REMAP_L2] = 0x1b4,
+ [HIF_REMAP_BASE_L2] = 0x1000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_END] = 0x7c3fffff,
+ [WTBLON_WDUCR] = 0x370,
+ [WTBL_UPDATE] = 0x380,
+ [WTBL_ITCR] = 0x3b0,
+ [WTBL_ITCR0] = 0x3b8,
+ [WTBL_ITCR1] = 0x3bc,
};
static const u32 mt7992_offs[] = {
@@ -80,6 +91,54 @@ static const u32 mt7992_offs[] = {
[MIB_BSCR7] = 0xae4,
[MIB_BSCR17] = 0xb0c,
[MIB_TRDR1] = 0xb24,
+ [HIF_REMAP_L1] = 0x8,
+ [HIF_REMAP_BASE_L1] = 0x40000,
+ [HIF_REMAP_L2] = 0x1b4,
+ [HIF_REMAP_BASE_L2] = 0x1000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_END] = 0x7c3fffff,
+ [WTBLON_WDUCR] = 0x370,
+ [WTBL_UPDATE] = 0x380,
+ [WTBL_ITCR] = 0x3b0,
+ [WTBL_ITCR0] = 0x3b8,
+ [WTBL_ITCR1] = 0x3bc,
+};
+
+static const u32 mt7990_offs[] = {
+ [MIB_RVSR0] = 0x800,
+ [MIB_RVSR1] = 0x804,
+ [MIB_BTSCR5] = 0x868,
+ [MIB_BTSCR6] = 0x878,
+ [MIB_RSCR1] = 0x890,
+ [MIB_RSCR27] = 0xa38,
+ [MIB_RSCR28] = 0xa3c,
+ [MIB_RSCR29] = 0xa40,
+ [MIB_RSCR30] = 0xa44,
+ [MIB_RSCR31] = 0xa48,
+ [MIB_RSCR33] = 0xa50,
+ [MIB_RSCR35] = 0xa58,
+ [MIB_RSCR36] = 0xa5c,
+ [MIB_BSCR0] = 0xbb8,
+ [MIB_BSCR1] = 0xbbc,
+ [MIB_BSCR2] = 0xbc0,
+ [MIB_BSCR3] = 0xbc4,
+ [MIB_BSCR4] = 0xbc8,
+ [MIB_BSCR5] = 0xbcc,
+ [MIB_BSCR6] = 0xbd0,
+ [MIB_BSCR7] = 0xbd4,
+ [MIB_BSCR17] = 0xbfc,
+ [MIB_TRDR1] = 0xc14,
+ [HIF_REMAP_L1] = 0x8,
+ [HIF_REMAP_BASE_L1] = 0x40000,
+ [HIF_REMAP_L2] = 0x1b8,
+ [HIF_REMAP_BASE_L2] = 0x110000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_END] = 0x7cffffff,
+ [WTBLON_WDUCR] = 0x400,
+ [WTBL_UPDATE] = 0x410,
+ [WTBL_ITCR] = 0x440,
+ [WTBL_ITCR0] = 0x448,
+ [WTBL_ITCR1] = 0x44c,
};
static const struct __map mt7996_reg_map[] = {
@@ -135,14 +194,83 @@ static const struct __map mt7996_reg_map[] = {
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
+static const struct __map mt7990_reg_map[] = {
+ {0x54000000, 0x02000, 0x1000}, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ {0x55000000, 0x03000, 0x1000}, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ {0x56000000, 0x04000, 0x1000}, /* WFDMA_2 (Reserved) */
+ {0x57000000, 0x05000, 0x1000}, /* WFDMA_3 (MCU wrap CR) */
+ {0x58000000, 0x06000, 0x1000}, /* WFDMA_4 (PCIE1 MCU DMA0 (MEM_DMA)) */
+ {0x59000000, 0x07000, 0x1000}, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ {0x820c0000, 0x08000, 0x4000}, /* WF_UMAC_TOP (PLE) */
+ {0x820c8000, 0x0c000, 0x2000}, /* WF_UMAC_TOP (PSE) */
+ {0x820cc000, 0x0e000, 0x2000}, /* WF_UMAC_TOP (PP) */
+ {0x820e0000, 0x20000, 0x0400}, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ {0x820e1000, 0x20400, 0x0200}, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ {0x820e2000, 0x20800, 0x0400}, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ {0x820e3000, 0x20c00, 0x0400}, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ {0x820e4000, 0x21000, 0x0400}, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ {0x820e5000, 0x21400, 0x0800}, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ {0x820ce000, 0x21c00, 0x0200}, /* WF_LMAC_TOP (WF_SEC) */
+ {0x820e7000, 0x21e00, 0x0200}, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ {0x820cf000, 0x22000, 0x1000}, /* WF_LMAC_TOP (WF_PF) */
+ {0x820e9000, 0x23400, 0x0200}, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ {0x820ea000, 0x24000, 0x0200}, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ {0x820eb000, 0x24200, 0x0400}, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ {0x820ec000, 0x24600, 0x0200}, /* WF_LMAC_TOP BN0 (WF_INT) */
+ {0x820ed000, 0x24800, 0x0800}, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ {0x820ca000, 0x26000, 0x2000}, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ {0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ {0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ {0x820f0000, 0xa0000, 0x0400}, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ {0x820f1000, 0xa0600, 0x0200}, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ {0x820f2000, 0xa0800, 0x0400}, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ {0x820f3000, 0xa0c00, 0x0400}, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ {0x820f4000, 0xa1000, 0x0400}, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ {0x820f5000, 0xa1400, 0x0800}, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ {0x820f7000, 0xa1e00, 0x0200}, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ {0x820f9000, 0xa3400, 0x0200}, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ {0x820fa000, 0xa4000, 0x0200}, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ {0x820fb000, 0xa4200, 0x0400}, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ {0x820fc000, 0xa4600, 0x0200}, /* WF_LMAC_TOP BN1 (WF_INT) */
+ {0x820fd000, 0xa4800, 0x0800}, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ {0x820cc000, 0xa5000, 0x2000}, /* WF_LMAC_TOP BN1 (WF_MUCOP) */
+ {0x820c4000, 0xa8000, 0x4000}, /* WF_LMAC_TOP (WF_UWTBL) */
+ {0x81030000, 0xae000, 0x100}, /* WFSYS_AON part 1 */
+ {0x81031000, 0xae100, 0x100}, /* WFSYS_AON part 2 */
+ {0x81032000, 0xae200, 0x100}, /* WFSYS_AON part 3 */
+ {0x81033000, 0xae300, 0x100}, /* WFSYS_AON part 4 */
+ {0x81034000, 0xae400, 0x100}, /* WFSYS_AON part 5 */
+ {0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ {0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+ {0x81040000, 0x120000, 0x1000}, /* WF_MCU_CFG_ON */
+ {0x81050000, 0x121000, 0x1000}, /* WF_MCU_EINT */
+ {0x81060000, 0x122000, 0x1000}, /* WF_MCU_GPT */
+ {0x81070000, 0x123000, 0x1000}, /* WF_MCU_WDT */
+ {0x80010000, 0x124000, 0x1000}, /* WF_AXIDMA */
+ {0x7c020000, 0xd0000, 0x10000}, /* CONN_INFRA, wfdma for from CODA flow use */
+ {0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top for from CODA flow use */
+ {0x20020000, 0xd0000, 0x10000}, /* CONN_INFRA, wfdma */
+ {0x20060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
+ {0x7c000000, 0xf0000, 0x10000}, /* CONN_INFRA */
+ {0x70020000, 0x1f0000, 0x9000}, /* PCIE remapping (AP2CONN) */
+ {0x0, 0x0, 0x0}, /* imply end of search */
+};
+
static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_mask, val;
- dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
- MT_HIF_REMAP_L1_MASK,
- FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
+ if (is_mt7996(&dev->mt76)) {
+ l1_mask = MT_HIF_REMAP_L1_MASK_7996;
+ val = FIELD_PREP(MT_HIF_REMAP_L1_MASK_7996, base);
+ } else {
+ l1_mask = MT_HIF_REMAP_L1_MASK;
+ val = FIELD_PREP(MT_HIF_REMAP_L1_MASK, base);
+ }
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1, l1_mask, val);
/* use read to push write */
dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
@@ -151,18 +279,41 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ u32 offset, base, l2_mask, val;
- dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
- MT_HIF_REMAP_L2_MASK,
- FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
+ if (is_mt7990(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_7990, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_7990, addr);
+ l2_mask = MT_HIF_REMAP_L2_MASK_7990;
+ val = FIELD_PREP(MT_HIF_REMAP_L2_MASK_7990, base);
+ } else {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ l2_mask = MT_HIF_REMAP_L2_MASK;
+ val = FIELD_PREP(MT_HIF_REMAP_L2_MASK, base);
+ }
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2, l2_mask, val);
/* use read to push write */
dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
return MT_HIF_REMAP_BASE_L2 + offset;
}
+static u32 mt7996_reg_map_cbtop(struct mt7996_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_CBTOP_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_CBTOP_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_CBTOP,
+ MT_HIF_REMAP_CBTOP_MASK,
+ FIELD_PREP(MT_HIF_REMAP_CBTOP_MASK, base));
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_CBTOP);
+
+ return MT_HIF_REMAP_BASE_CBTOP + offset;
+}
+
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
@@ -193,17 +344,20 @@ static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
return mt7996_reg_map_l1(dev, addr);
- if (dev_is_pci(dev->mt76.dev) &&
- ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
- addr >= MT_CBTOP2_PHY_START))
- return mt7996_reg_map_l1(dev, addr);
-
/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
return mt7996_reg_map_l1(dev, addr);
}
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ addr >= MT_CBTOP2_PHY_START)) {
+ if (is_mt7990(&dev->mt76))
+ return mt7996_reg_map_cbtop(dev, addr);
+ return mt7996_reg_map_l1(dev, addr);
+ }
+
return mt7996_reg_map_l2(dev, addr);
}
@@ -323,6 +477,9 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.base = devm_ioremap(dev->mt76.dev,
pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
+ if (!wed->wlan.base)
+ return -ENOMEM;
+
wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
if (hif2) {
@@ -350,7 +507,7 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
MT7996_RXQ_BAND0 * MT_RING_SIZE;
- wed->wlan.id = 0x7991;
+ wed->wlan.id = MT7996_DEVICE_ID_2;
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
} else {
wed->wlan.hw_rro = dev->has_rro; /* default on */
@@ -443,18 +600,24 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
spin_lock_init(&dev->reg_lock);
switch (device_id) {
- case 0x7990:
+ case MT7996_DEVICE_ID:
dev->reg.base = mt7996_reg_base;
dev->reg.offs_rev = mt7996_offs;
dev->reg.map = mt7996_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
break;
- case 0x7992:
+ case MT7992_DEVICE_ID:
dev->reg.base = mt7996_reg_base;
dev->reg.offs_rev = mt7992_offs;
dev->reg.map = mt7996_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
break;
+ case MT7990_DEVICE_ID:
+ dev->reg.base = mt7996_reg_base;
+ dev->reg.offs_rev = mt7990_offs;
+ dev->reg.map = mt7990_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7990_reg_map);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 43e646ed6094..1ad6bc046f7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -14,7 +14,7 @@
#define MT7996_MAX_RADIOS 3
#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
-#define MT7996_WTBL_BMC_SIZE (is_mt7992(&dev->mt76) ? 32 : 64)
+#define MT7996_WTBL_BMC_SIZE (is_mt7996(&dev->mt76) ? 64 : 32)
#define MT7996_WTBL_RESERVED (mt7996_wtbl_size(dev) - 1)
#define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \
mt7996_max_interface_num(dev))
@@ -29,6 +29,16 @@
#define MT7996_RX_RING_SIZE 1536
#define MT7996_RX_MCU_RING_SIZE 512
#define MT7996_RX_MCU_RING_SIZE_WA 1024
+/* scatter-gather of mcu event is not supported in connac3 */
+#define MT7996_RX_MCU_BUF_SIZE (2048 + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define MT7996_DEVICE_ID 0x7990
+#define MT7996_DEVICE_ID_2 0x7991
+#define MT7992_DEVICE_ID 0x7992
+#define MT7992_DEVICE_ID_2 0x799a
+#define MT7990_DEVICE_ID 0x7993
+#define MT7990_DEVICE_ID_2 0x799b
#define MT7996_FIRMWARE_WA "mediatek/mt7996/mt7996_wa.bin"
#define MT7996_FIRMWARE_WM "mediatek/mt7996/mt7996_wm.bin"
@@ -50,6 +60,11 @@
#define MT7992_FIRMWARE_DSP_23 "mediatek/mt7996/mt7992_dsp_23.bin"
#define MT7992_ROM_PATCH_23 "mediatek/mt7996/mt7992_rom_patch_23.bin"
+#define MT7990_FIRMWARE_WA ""
+#define MT7990_FIRMWARE_WM "mediatek/mt7996/mt7990_wm.bin"
+#define MT7990_FIRMWARE_DSP ""
+#define MT7990_ROM_PATCH "mediatek/mt7996/mt7990_rom_patch.bin"
+
#define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin"
#define MT7996_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7996_eeprom_2i5i6i.bin"
#define MT7996_EEPROM_DEFAULT_233 "mediatek/mt7996/mt7996_eeprom_233.bin"
@@ -61,6 +76,9 @@
#define MT7992_EEPROM_DEFAULT_23 "mediatek/mt7996/mt7992_eeprom_23.bin"
#define MT7992_EEPROM_DEFAULT_23_INT "mediatek/mt7996/mt7992_eeprom_23_2i5i.bin"
+#define MT7990_EEPROM_DEFAULT "mediatek/mt7996/mt7990_eeprom.bin"
+#define MT7990_EEPROM_DEFAULT_INT "mediatek/mt7996/mt7990_eeprom_2i5i.bin"
+
#define MT7996_EEPROM_SIZE 7680
#define MT7996_EEPROM_BLOCK_SIZE 16
#define MT7996_TOKEN_SIZE 16384
@@ -131,6 +149,10 @@ enum mt7992_var_type {
MT7992_VAR_TYPE_23,
};
+enum mt7990_var_type {
+ MT7990_VAR_TYPE_23,
+};
+
enum mt7996_fem_type {
MT7996_FEM_EXT,
MT7996_FEM_INT,
@@ -165,6 +187,8 @@ enum mt7996_rxq_id {
MT7996_RXQ_TXFREE1 = 9,
MT7996_RXQ_TXFREE2 = 7,
MT7996_RXQ_RRO_IND = 0,
+ MT7990_RXQ_TXFREE0 = 6,
+ MT7990_RXQ_TXFREE1 = 7,
};
struct mt7996_twt_flow {
@@ -281,8 +305,6 @@ struct mt7996_phy {
s16 coverage_class;
u8 slottime;
- u8 rdd_state;
-
u16 beacon_rate;
u32 rx_ampdu_ts;
@@ -293,6 +315,7 @@ struct mt7996_phy {
struct mt76_channel_state state_ts;
u16 orig_chainmask;
+ u16 orig_antenna_mask;
bool has_aux_rx;
bool counter_reset;
@@ -405,10 +428,10 @@ enum {
__MT_WFDMA_MAX,
};
-enum {
- MT_RX_SEL0,
- MT_RX_SEL1,
- MT_RX_SEL2, /* monitor chain */
+enum rdd_idx {
+ MT_RDD_IDX_BAND2, /* RDD idx for band idx 2 */
+ MT_RDD_IDX_BAND1, /* RDD idx for band idx 1 */
+ MT_RDD_IDX_BACKGROUND, /* RDD idx for background chain */
};
enum mt7996_rdd_cmd {
@@ -427,6 +450,21 @@ enum mt7996_rdd_cmd {
RDD_IRQ_OFF,
};
+static inline int
+mt7996_get_rdd_idx(struct mt7996_phy *phy, bool is_background)
+{
+ if (!phy->mt76->cap.has_5ghz)
+ return -1;
+
+ if (is_background)
+ return MT_RDD_IDX_BACKGROUND;
+
+ if (phy->mt76->band_idx == MT_BAND2)
+ return MT_RDD_IDX_BAND2;
+
+ return MT_RDD_IDX_BAND1;
+}
+
static inline struct mt7996_dev *
mt7996_hw_dev(struct ieee80211_hw *hw)
{
@@ -461,31 +499,12 @@ mt7996_phy3(struct mt7996_dev *dev)
static inline bool
mt7996_band_valid(struct mt7996_dev *dev, u8 band)
{
- if (is_mt7992(&dev->mt76))
+ if (!is_mt7996(&dev->mt76))
return band <= MT_BAND1;
return band <= MT_BAND2;
}
-static inline bool
-mt7996_has_background_radar(struct mt7996_dev *dev)
-{
- switch (mt76_chip(&dev->mt76)) {
- case 0x7990:
- if (dev->var.type == MT7996_VAR_TYPE_233)
- return false;
- break;
- case 0x7992:
- if (dev->var.type == MT7992_VAR_TYPE_23)
- return false;
- break;
- default:
- return false;
- }
-
- return true;
-}
-
static inline struct mt7996_phy *
mt7996_band_phy(struct mt7996_dev *dev, enum nl80211_band band)
{
@@ -549,6 +568,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy);
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
struct ieee80211_channel *chan);
s8 mt7996_eeprom_get_power_delta(struct mt7996_dev *dev, int band);
+bool mt7996_eeprom_has_background_radar(struct mt7996_dev *dev);
int mt7996_dma_init(struct mt7996_dev *dev);
void mt7996_dma_reset(struct mt7996_dev *dev, bool force);
void mt7996_dma_prefetch(struct mt7996_dev *dev);
@@ -637,8 +657,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy);
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state);
int mt7996_mcu_set_thermal_protect(struct mt7996_phy *phy, bool enable);
int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy);
-int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
- u8 rx_sel, u8 val);
+int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 rdd_idx, u8 val);
int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
struct cfg80211_chan_def *chandef);
int mt7996_mcu_set_fixed_rate_table(struct mt7996_phy *phy, u8 table_idx,
@@ -704,6 +723,11 @@ static inline u16 mt7996_rx_chainmask(struct mt7996_phy *phy)
return tx_chainmask | (BIT(fls(tx_chainmask)) * phy->has_aux_rx);
}
+static inline bool mt7996_has_wa(struct mt7996_dev *dev)
+{
+ return !is_mt7990(&dev->mt76);
+}
+
void mt7996_mac_init(struct mt7996_dev *dev);
u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw);
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
index 04056181368a..19e99bc1c6c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -16,14 +16,16 @@ static DEFINE_SPINLOCK(hif_lock);
static u32 hif_idx;
static const struct pci_device_id mt7996_pci_device_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7990) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7992) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID) },
{ },
};
static const struct pci_device_id mt7996_hif_device_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7991) },
- { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x799a) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID_2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID_2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID_2) },
{ },
};
@@ -63,8 +65,9 @@ static struct mt7996_hif *mt7996_pci_init_hif2(struct pci_dev *pdev)
{
hif_idx++;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7991, NULL) &&
- !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x799a, NULL))
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7996_DEVICE_ID_2, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7992_DEVICE_ID_2, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, MT7990_DEVICE_ID_2, NULL))
return NULL;
writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
@@ -121,7 +124,9 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mt76_pci_disable_aspm(pdev);
- if (id->device == 0x7991 || id->device == 0x799a)
+ if (id->device == MT7996_DEVICE_ID_2 ||
+ id->device == MT7992_DEVICE_ID_2 ||
+ id->device == MT7990_DEVICE_ID_2)
return mt7996_pci_hif2_probe(pdev);
dev = mt7996_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
@@ -256,3 +261,5 @@ MODULE_FIRMWARE(MT7992_FIRMWARE_WA);
MODULE_FIRMWARE(MT7992_FIRMWARE_WM);
MODULE_FIRMWARE(MT7992_FIRMWARE_DSP);
MODULE_FIRMWARE(MT7992_ROM_PATCH);
+MODULE_FIRMWARE(MT7990_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7990_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 1876a968c92d..e942c0058731 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -64,6 +64,17 @@ enum offs_rev {
MIB_BSCR7,
MIB_BSCR17,
MIB_TRDR1,
+ HIF_REMAP_L1,
+ HIF_REMAP_BASE_L1,
+ HIF_REMAP_L2,
+ HIF_REMAP_BASE_L2,
+ CBTOP1_PHY_END,
+ INFRA_MCU_END,
+ WTBLON_WDUCR,
+ WTBL_UPDATE,
+ WTBL_ITCR,
+ WTBL_ITCR0,
+ WTBL_ITCR1,
__MT_OFFS_MAX,
};
@@ -291,19 +302,19 @@ enum offs_rev {
/* WTBLON TOP */
#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x370)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(4, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x380)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(11, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_ITCR MT_WTBLON_TOP(0x3b0)
+#define MT_WTBL_ITCR MT_WTBLON_TOP(__OFFS(WTBL_ITCR))
#define MT_WTBL_ITCR_WR BIT(16)
#define MT_WTBL_ITCR_EXEC BIT(31)
-#define MT_WTBL_ITDR0 MT_WTBLON_TOP(0x3b8)
-#define MT_WTBL_ITDR1 MT_WTBLON_TOP(0x3bc)
+#define MT_WTBL_ITDR0 MT_WTBLON_TOP(__OFFS(WTBL_ITCR0))
+#define MT_WTBL_ITDR1 MT_WTBLON_TOP(__OFFS(WTBL_ITCR1))
#define MT_WTBL_SPE_IDX_SEL BIT(6)
/* WTBL */
@@ -483,7 +494,7 @@ enum offs_rev {
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q) * 0x4)
-#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
MT_RXQ_ID(q) * 0x4)
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
MT_TXQ_ID(q) * 0x4)
@@ -504,6 +515,8 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_TRI BIT(3)
#define MT_INT_RX_TXFREE_MAIN BIT(17)
#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_RX_TXFREE_BAND0_MT7990 BIT(14)
+#define MT_INT_RX_TXFREE_BAND1_MT7990 BIT(15)
#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
#define MT_INT_RX_TXFREE_EXT BIT(26)
#define MT_INT_MCU_CMD BIT(29)
@@ -576,27 +589,39 @@ enum offs_rev {
#define MT_MCU_CMD_WDT_MASK GENMASK(31, 30)
/* l1/l2 remap */
-#define MT_HIF_REMAP_L1 0x155024
-#define MT_HIF_REMAP_L1_MASK GENMASK(31, 16)
+#define CONN_BUS_CR_VON_BASE 0x155000
+#define MT_HIF_REMAP_L1 (CONN_BUS_CR_VON_BASE + __OFFS(HIF_REMAP_L1))
+#define MT_HIF_REMAP_L1_MASK_7996 GENMASK(31, 16)
+#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
-#define MT_HIF_REMAP_BASE_L1 0x130000
+#define MT_HIF_REMAP_BASE_L1 __OFFS(HIF_REMAP_BASE_L1)
-#define MT_HIF_REMAP_L2 0x1b4
+#define MT_HIF_REMAP_L2 __OFFS(HIF_REMAP_L2)
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
-#define MT_HIF_REMAP_BASE_L2 0x1000
+#define MT_HIF_REMAP_L2_MASK_7990 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_OFFSET_7990 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_7990 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2 __OFFS(HIF_REMAP_BASE_L2)
+
+/* for mt7990 only */
+#define MT_HIF_REMAP_CBTOP 0x1f6554
+#define MT_HIF_REMAP_CBTOP_MASK GENMASK(15, 0)
+#define MT_HIF_REMAP_CBTOP_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_CBTOP_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_CBTOP 0x1c0000
#define MT_INFRA_BASE 0x18000000
#define MT_WFSYS0_PHY_START 0x18400000
#define MT_WFSYS1_PHY_START 0x18800000
#define MT_WFSYS1_PHY_END 0x18bfffff
#define MT_CBTOP1_PHY_START 0x70000000
-#define MT_CBTOP1_PHY_END 0x77ffffff
+#define MT_CBTOP1_PHY_END __OFFS(CBTOP1_PHY_END)
#define MT_CBTOP2_PHY_START 0xf0000000
#define MT_INFRA_MCU_START 0x7c000000
-#define MT_INFRA_MCU_END 0x7c3fffff
+#define MT_INFRA_MCU_END __OFFS(INFRA_MCU_END)
/* FW MODE SYNC */
#define MT_FW_ASSERT_CNT 0x02208274
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 10d2e2124ff8..c2a1234b59db 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -503,8 +503,10 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer,
(void *)buffer, buffer_len, complete_fn, context);
r = usb_submit_urb(urb, GFP_ATOMIC);
- if (r)
+ if (r) {
+ usb_free_urb(urb);
dev_err(&udev->dev, "Async write submit failed (%d)\n", r);
+ }
return r;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index e5f553a1ea24..b7ea606bda08 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -9393,7 +9393,7 @@ static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
p0, p1, pf, idx0, idx1, ibit);
if (bidx != 5 && pf <= p0 && pf < p1) {
- idxf[iorq] = idxf[iorq];
+ /* no need to adjust idxf[] */;
} else if (p0 < p1) {
pf = p0;
idxf[iorq] = idx0 & 0x3F;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 7537f04b1930..819cf519e66e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -58,17 +58,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
}
EXPORT_SYMBOL(rtl_rfreg_delay);
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
-{
- if (addr >= 0xf9 && addr <= 0xfe) {
- rtl_addr_delay(addr);
- } else {
- rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
- }
-}
-EXPORT_SYMBOL(rtl_bb_delay);
-
static void rtl_fw_do_work(const struct firmware *firmware, void *context,
bool is_wow)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
index 42c2d9e13bb8..45225d89ac5e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.h
+++ b/drivers/net/wireless/realtek/rtlwifi/core.h
@@ -58,7 +58,6 @@ void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
void rtl_addr_delay(u32 addr);
void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
u32 mask, u32 data);
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
bool rtl_btc_status_false(void);
void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 0eafc4d125f9..898f597f70a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -155,6 +155,16 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
((u8)init_aspm) == (PCI_EXP_LNKCTL_ASPM_L0S |
PCI_EXP_LNKCTL_ASPM_L1 | PCI_EXP_LNKCTL_CCC))
ppsc->support_aspm = false;
+
+ /* RTL8723BE found on some ASUSTek laptops, such as F441U and
+ * X555UQ with subsystem ID 11ad:1723 are known to output large
+ * amounts of PCIe AER errors during and after boot up, causing
+ * heavy lags, poor network throughput, and occasional lock-ups.
+ */
+ if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8723BE &&
+ (rtlpci->pdev->subsystem_vendor == 0x11ad &&
+ rtlpci->pdev->subsystem_device == 0x1723))
+ ppsc->support_aspm = false;
}
static bool _rtl_pci_platform_switch_device_pci_aspm(
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index e07402e73ba3..68f890050afb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2055,11 +2055,6 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
-void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
- return;
-}
-
static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid,
u32 para1, u32 para2, u32 msdelay)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index bbe9ef77225e..a9bfe54f2802 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -83,7 +83,6 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw);
bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw);
void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw);
-void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
index 289ec71ce3e5..8c2167cc1f13 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
@@ -2445,11 +2445,6 @@ void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
-void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
- /* Nothing to do. */
-}
-
u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
index 090a6203db7e..a107a5a76beb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
@@ -24,7 +24,6 @@ void rtl92du_phy_set_poweron(struct ieee80211_hw *hw);
bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw);
void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw);
-void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw);
void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 73ef602bfb01..1e7f0cd1c86e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -2926,10 +2926,6 @@ void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw)
rtlphy->lck_inprogress = false;
}
-void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
-}
-
void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
{
_rtl92ee_phy_set_rfpath_switch(hw, bmain, false);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
index 1a5dbc628379..ec4c26b81c48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
@@ -119,7 +119,6 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 9eddbada8af1..13a05066e8a6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -4586,10 +4586,6 @@ void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw)
{
}
-void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
-{
-}
-
void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
{
_rtl8821ae_phy_set_rfpath_switch(hw, bmain);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
index 35b7d0f70125..90bf5462a3f8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
@@ -214,7 +214,6 @@ void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw,
bool b_recovery);
void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw,
bool b_recovery);
-void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index f5718e570011..d35ed56d6db9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1077,15 +1077,3 @@ void rtl_usb_disconnect(struct usb_interface *intf)
ieee80211_free_hw(hw);
}
EXPORT_SYMBOL(rtl_usb_disconnect);
-
-int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
-{
- return 0;
-}
-EXPORT_SYMBOL(rtl_usb_suspend);
-
-int rtl_usb_resume(struct usb_interface *pusb_intf)
-{
- return 0;
-}
-EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index b66d6f9ae564..b873bbc9c4c2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -138,7 +138,5 @@ int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
const struct rtl_hal_cfg *rtl92cu_hal_cfg);
void rtl_usb_disconnect(struct usb_interface *intf);
-int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
-int rtl_usb_resume(struct usb_interface *pusb_intf);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index c929db1e53ca..64904278ddad 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -309,7 +309,7 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type)
{
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
- u8 para[2] = {0};
+ u8 para[6] = {};
u8 times;
u16 tbtt_interval = coex_stat->wl_beacon_interval;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 6b563ac489a7..4fc78b882080 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1466,7 +1466,7 @@ void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size)
{
- u8 bckp[2];
+ u8 bckp[3];
u8 val;
u16 rsvd_pg_head;
u32 bcn_valid_addr;
@@ -1478,6 +1478,8 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
if (!size)
return -EINVAL;
+ bckp[2] = rtw_read8(rtwdev, REG_BCN_CTRL);
+
if (rtw_chip_wcpu_11n(rtwdev)) {
rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
} else {
@@ -1491,6 +1493,9 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
+ rtw_write8(rtwdev, REG_BCN_CTRL,
+ (bckp[2] & ~BIT_EN_BCN_FUNCTION) | BIT_DIS_TSF_UDT);
+
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
bckp[1] = val;
@@ -1521,6 +1526,7 @@ restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
+ rtw_write8(rtwdev, REG_BCN_CTRL, bckp[2]);
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 96aeda26014e..d4bee9c3ecfe 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -19,6 +19,8 @@ struct rtw_hci_ops {
void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
void (*interface_cfg)(struct rtw_dev *rtwdev);
void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable);
+ void (*write_firmware_page)(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
@@ -79,6 +81,12 @@ static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable);
}
+static inline void rtw_hci_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size)
+{
+ rtwdev->hci.ops->write_firmware_page(rtwdev, page, data, size);
+}
+
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 0491f501c138..f66d1b302dc5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -856,8 +856,8 @@ fwdl_ready:
}
}
-static void
-write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
+void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size)
{
u32 val32;
u32 block_nr;
@@ -887,6 +887,7 @@ write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
}
}
+EXPORT_SYMBOL(rtw_write_firmware_page);
static int
download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
@@ -904,11 +905,13 @@ download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
for (page = 0; page < total_page; page++) {
- write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
+ rtw_hci_write_firmware_page(rtwdev, page, data,
+ DLFW_PAGE_SIZE_LEGACY);
data += DLFW_PAGE_SIZE_LEGACY;
}
if (last_page_size)
- write_firmware_page(rtwdev, page, data, last_page_size);
+ rtw_hci_write_firmware_page(rtwdev, page, data,
+ last_page_size);
if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
rtw_err(rtwdev, "failed to check download firmware report\n");
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index 6905e2747372..e92b1483728d 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -34,6 +34,8 @@ int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
const struct rtw_pwr_seq_cmd * const *cmd_seq);
int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
+void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 026fbf4ad9cc..77f9fbe1870c 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -396,6 +396,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
+ rtw_set_ampdu_factor(rtwdev, vif, conf);
+
rtw_fw_beacon_filter_config(rtwdev, true, vif);
} else {
rtw_leave_lps(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 959f56a3cc1a..c4de5d114eda 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2242,7 +2242,8 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
- ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ if (rtwdev->chip->amsdu_in_ampdu)
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
@@ -2447,6 +2448,38 @@ void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable)
}
}
+void rtw_set_ampdu_factor(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ const struct rtw_chip_ops *ops = rtwdev->chip->ops;
+ struct ieee80211_sta *sta;
+ u8 factor = 0xff;
+
+ if (!ops->set_ampdu_factor)
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ rtw_warn(rtwdev, "%s: failed to find station %pM\n",
+ __func__, bss_conf->bssid);
+ return;
+ }
+
+ if (sta->deflink.vht_cap.vht_supported)
+ factor = u32_get_bits(sta->deflink.vht_cap.cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ else if (sta->deflink.ht_cap.ht_supported)
+ factor = sta->deflink.ht_cap.ampdu_factor;
+
+ rcu_read_unlock();
+
+ if (factor != 0xff)
+ ops->set_ampdu_factor(rtwdev, factor);
+}
+
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ac wireless core module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 02343e059fd9..b0f1fabe9554 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -878,6 +878,7 @@ struct rtw_chip_ops {
u32 antenna_rx);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
void (*efuse_grant)(struct rtw_dev *rtwdev, bool enable);
+ void (*set_ampdu_factor)(struct rtw_dev *rtwdev, u8 factor);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*phy_calibration)(struct rtw_dev *rtwdev);
void (*dpk_track)(struct rtw_dev *rtwdev);
@@ -1229,6 +1230,7 @@ struct rtw_chip_info {
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
const struct rtw_fwcd_segs *fwcd_segs;
+ bool amsdu_in_ampdu;
u8 usb_tx_agg_desc_num;
bool hw_feature_report;
u8 c2h_ra_report_size;
@@ -2272,4 +2274,6 @@ void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
void rtw_core_port_switch(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
bool rtw_core_check_sta_active(struct rtw_dev *rtwdev);
void rtw_core_enable_beacon(struct rtw_dev *rtwdev, bool enable);
+void rtw_set_ampdu_factor(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index bb4c4ccb31d4..7f2b6dc21f56 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -12,6 +12,7 @@
#include "fw.h"
#include "ps.h"
#include "debug.h"
+#include "mac.h"
static bool rtw_disable_msi;
static bool rtw_pci_disable_aspm;
@@ -1602,6 +1603,7 @@ static const struct rtw_hci_ops rtw_pci_ops = {
.link_ps = rtw_pci_link_ps,
.interface_cfg = rtw_pci_interface_cfg,
.dynamic_rx_agg = NULL,
+ .write_firmware_page = rtw_write_firmware_page,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index 1d232adbdd7e..9e6700c43a63 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -519,15 +519,6 @@ static const struct rtw_rqpn rqpn_table_8703b[] = {
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
-/* Default power index table for RTL8703B, used if EFUSE does not
- * contain valid data. Replaces EFUSE data from offset 0x10 (start of
- * txpwr_idx_table).
- */
-static const u8 rtw8703b_txpwr_idx_table[] = {
- 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D,
- 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02
-};
-
static void try_mac_from_devicetree(struct rtw_dev *rtwdev)
{
struct device_node *node = rtwdev->dev->of_node;
@@ -544,15 +535,9 @@ static void try_mac_from_devicetree(struct rtw_dev *rtwdev)
}
}
-#define DBG_EFUSE_FIX(rtwdev, name) \
- rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \
- # name "=0x%x\n", rtwdev->efuse.name)
-
static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
- u8 *pwr = (u8 *)efuse->txpwr_idx_table;
- bool valid = false;
int ret;
ret = rtw8723x_read_efuse(rtwdev, log_map);
@@ -562,51 +547,6 @@ static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
if (!is_valid_ether_addr(efuse->addr))
try_mac_from_devicetree(rtwdev);
- /* If TX power index table in EFUSE is invalid, fall back to
- * built-in table.
- */
- for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
- if (pwr[i] != 0xff) {
- valid = true;
- break;
- }
- if (!valid) {
- for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
- pwr[i] = rtw8703b_txpwr_idx_table[i];
- rtw_dbg(rtwdev, RTW_DBG_EFUSE,
- "Replaced invalid EFUSE TX power index table.");
- rtw8723x_debug_txpwr_limit(rtwdev,
- efuse->txpwr_idx_table, 2);
- }
-
- /* Override invalid antenna settings. */
- if (efuse->bt_setting == 0xff) {
- /* shared antenna */
- efuse->bt_setting |= BIT(0);
- /* RF path A */
- efuse->bt_setting &= ~BIT(6);
- DBG_EFUSE_FIX(rtwdev, bt_setting);
- }
-
- /* Override invalid board options: The coex code incorrectly
- * assumes that if bits 6 & 7 are set the board doesn't
- * support coex. Regd is also derived from rf_board_option and
- * should be 0 if there's no valid data.
- */
- if (efuse->rf_board_option == 0xff) {
- efuse->regd = 0;
- efuse->rf_board_option &= GENMASK(5, 0);
- DBG_EFUSE_FIX(rtwdev, rf_board_option);
- }
-
- /* Override invalid crystal cap setting, default comes from
- * vendor driver. Chip specific.
- */
- if (efuse->crystal_cap == 0xff) {
- efuse->crystal_cap = 0x20;
- DBG_EFUSE_FIX(rtwdev, crystal_cap);
- }
-
return 0;
}
@@ -1904,6 +1844,7 @@ static const struct rtw_chip_ops rtw8703b_ops = {
.set_antenna = NULL,
.cfg_ldo25 = rtw8723x_cfg_ldo25,
.efuse_grant = rtw8723x_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8723x_false_alarm_statistics,
.phy_calibration = rtw8703b_phy_calibration,
.dpk_track = NULL,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
index 8d38d36be8c0..1f98d35a8dd1 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
@@ -19,7 +19,7 @@ static const struct sdio_device_id rtw_8723cs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table);
static struct sdio_driver rtw_8723cs_driver = {
- .name = "rtw8723cs",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8723cs_id_table,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 87715bd54860..31876e708f9e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -1404,6 +1404,7 @@ static const struct rtw_chip_ops rtw8723d_ops = {
.set_antenna = NULL,
.cfg_ldo25 = rtw8723x_cfg_ldo25,
.efuse_grant = rtw8723x_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8723x_false_alarm_statistics,
.phy_calibration = rtw8723d_phy_calibration,
.cck_pd_set = rtw8723d_phy_cck_pd_set,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
index abbaafa32851..87c8bc9d18a9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
@@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8723de_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8723de_id_table);
static struct pci_driver rtw_8723de_driver = {
- .name = "rtw_8723de",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8723de_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
index e5b6960ba0a0..206b77e5b98e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723ds.c
@@ -25,7 +25,7 @@ static const struct sdio_device_id rtw_8723ds_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8723ds_id_table);
static struct sdio_driver rtw_8723ds_driver = {
- .name = "rtw_8723ds",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8723ds_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723du.c b/drivers/net/wireless/realtek/rtw88/rtw8723du.c
index 322a805da76b..b661a26e0e22 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723du.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723du.c
@@ -24,7 +24,7 @@ static int rtw8723du_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8723du_driver = {
- .name = "rtw_8723du",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8723du_id_table,
.probe = rtw8723du_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
index 69f73cb5b4cd..4c77963fdd37 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
@@ -69,6 +69,9 @@ static void __rtw8723x_lck(struct rtw_dev *rtwdev)
#define DBG_EFUSE_2BYTE(rtwdev, map, name) \
rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x%02x\n", \
(map)->name[0], (map)->name[1])
+#define DBG_EFUSE_FIX(rtwdev, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \
+ # name "=0x%x\n", rtwdev->efuse.name)
static void rtw8723xe_efuse_debug(struct rtw_dev *rtwdev,
struct rtw8723x_efuse *map)
@@ -238,10 +241,21 @@ static void rtw8723xs_efuse_parsing(struct rtw_efuse *efuse,
ether_addr_copy(efuse->addr, map->s.mac_addr);
}
+/* Default power index table for RTL8703B/RTL8723D, used if EFUSE does
+ * not contain valid data. Replaces EFUSE data from offset 0x10 (start
+ * of txpwr_idx_table).
+ */
+static const u8 rtw8723x_txpwr_idx_table[] = {
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D,
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02
+};
+
static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 *pwr = (u8 *)efuse->txpwr_idx_table;
struct rtw8723x_efuse *map;
+ bool valid = false;
int i;
map = (struct rtw8723x_efuse *)log_map;
@@ -279,6 +293,51 @@ static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
return -EOPNOTSUPP;
}
+ /* If TX power index table in EFUSE is invalid, fall back to
+ * built-in table.
+ */
+ for (i = 0; i < ARRAY_SIZE(rtw8723x_txpwr_idx_table); i++)
+ if (pwr[i] != 0xff) {
+ valid = true;
+ break;
+ }
+ if (!valid) {
+ for (i = 0; i < ARRAY_SIZE(rtw8723x_txpwr_idx_table); i++)
+ pwr[i] = rtw8723x_txpwr_idx_table[i];
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "Replaced invalid EFUSE TX power index table.");
+ rtw8723x_debug_txpwr_limit(rtwdev,
+ efuse->txpwr_idx_table, 2);
+ }
+
+ /* Override invalid antenna settings. */
+ if (efuse->bt_setting == 0xff) {
+ /* shared antenna */
+ efuse->bt_setting |= BIT(0);
+ /* RF path A */
+ efuse->bt_setting &= ~BIT(6);
+ DBG_EFUSE_FIX(rtwdev, bt_setting);
+ }
+
+ /* Override invalid board options: The coex code incorrectly
+ * assumes that if bits 6 & 7 are set the board doesn't
+ * support coex. Regd is also derived from rf_board_option and
+ * should be 0 if there's no valid data.
+ */
+ if (efuse->rf_board_option == 0xff) {
+ efuse->regd = 0;
+ efuse->rf_board_option &= GENMASK(5, 0);
+ DBG_EFUSE_FIX(rtwdev, rf_board_option);
+ }
+
+ /* Override invalid crystal cap setting, default comes from
+ * vendor driver. Chip specific.
+ */
+ if (efuse->crystal_cap == 0xff) {
+ efuse->crystal_cap = 0x20;
+ DBG_EFUSE_FIX(rtwdev, crystal_cap);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
index f9ba2aa2928a..c2ef41767ff9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
@@ -925,6 +925,7 @@ static const struct rtw_chip_ops rtw8812a_ops = {
.set_tx_power_index = rtw88xxa_set_tx_power_index,
.cfg_ldo25 = rtw8812a_cfg_ldo25,
.efuse_grant = rtw88xxa_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw88xxa_false_alarm_statistics,
.phy_calibration = rtw8812a_phy_calibration,
.cck_pd_set = rtw88xxa_phy_cck_pd_set,
@@ -1075,6 +1076,7 @@ const struct rtw_chip_info rtw8812a_hw_spec = {
.rfe_defs = rtw8812a_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8812a_rfe_defs),
.rx_ldpc = false,
+ .amsdu_in_ampdu = true,
.hw_feature_report = false,
.c2h_ra_report_size = 4,
.old_datarate_fb_limit = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
index e18995f4cc78..dfea89670372 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
@@ -82,7 +82,7 @@ static const struct usb_device_id rtw_8812au_id_table[] = {
MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);
static struct usb_driver rtw_8812au_driver = {
- .name = "rtw_8812au",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8812au_id_table,
.probe = rtw_usb_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
index cfd35d40d46e..44dd3090484b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
@@ -1332,6 +1332,16 @@ static void rtw8814a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
}
+/* Without this RTL8814A sends too many frames and (some?) 11n AP
+ * can't handle it, resulting in low TX speed. Other chips seem fine.
+ */
+static void rtw8814a_set_ampdu_factor(struct rtw_dev *rtwdev, u8 factor)
+{
+ factor = min_t(u8, factor, IEEE80211_VHT_MAX_AMPDU_256K);
+
+ rtw_write32(rtwdev, REG_AMPDU_MAX_LENGTH, (8192 << factor) - 1);
+}
+
static void rtw8814a_false_alarm_statistics(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -2051,6 +2061,7 @@ static const struct rtw_chip_ops rtw8814a_ops = {
.set_antenna = NULL,
.cfg_ldo25 = rtw8814a_cfg_ldo25,
.efuse_grant = rtw8814a_efuse_grant,
+ .set_ampdu_factor = rtw8814a_set_ampdu_factor,
.false_alarm_statistics = rtw8814a_false_alarm_statistics,
.phy_calibration = rtw8814a_phy_calibration,
.cck_pd_set = rtw8814a_phy_cck_pd_set,
@@ -2189,6 +2200,7 @@ const struct rtw_chip_info rtw8814a_hw_spec = {
.rx_ldpc = true,
.max_power_index = 0x3f,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .amsdu_in_ampdu = false, /* RX speed is better without AMSDU */
.usb_tx_agg_desc_num = 3,
.hw_feature_report = false,
.c2h_ra_report_size = 6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
index 54d2e20a7764..c7436f1c8d40 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
@@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8814ae_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8814ae_id_table);
static struct pci_driver rtw_8814ae_driver = {
- .name = "rtw_8814ae",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8814ae_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814au.c b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
index afe045fb84de..1a0886ec17dd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
@@ -42,7 +42,7 @@ static const struct usb_device_id rtw_8814au_id_table[] = {
MODULE_DEVICE_TABLE(usb, rtw_8814au_id_table);
static struct usb_driver rtw_8814au_driver = {
- .name = "rtw_8814au",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8814au_id_table,
.probe = rtw_usb_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
index f68239b07319..413aec694c33 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
@@ -871,6 +871,7 @@ static const struct rtw_chip_ops rtw8821a_ops = {
.set_tx_power_index = rtw88xxa_set_tx_power_index,
.cfg_ldo25 = rtw8821a_cfg_ldo25,
.efuse_grant = rtw88xxa_efuse_grant,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw88xxa_false_alarm_statistics,
.phy_calibration = rtw8821a_phy_calibration,
.cck_pd_set = rtw88xxa_phy_cck_pd_set,
@@ -1175,6 +1176,7 @@ const struct rtw_chip_info rtw8821a_hw_spec = {
.rfe_defs = rtw8821a_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8821a_rfe_defs),
.rx_ldpc = false,
+ .amsdu_in_ampdu = true,
.hw_feature_report = false,
.c2h_ra_report_size = 4,
.old_datarate_fb_limit = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
index a01744b64e8d..28c281b32978 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
@@ -66,7 +66,7 @@ static const struct usb_device_id rtw_8821au_id_table[] = {
MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);
static struct usb_driver rtw_8821au_driver = {
- .name = "rtw_8821au",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8821au_id_table,
.probe = rtw_usb_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 0ade7f11cbd2..413130a30ca9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1668,6 +1668,7 @@ static const struct rtw_chip_ops rtw8821c_ops = {
.set_antenna = NULL,
.set_tx_power_index = rtw8821c_set_tx_power_index,
.cfg_ldo25 = rtw8821c_cfg_ldo25,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8821c_false_alarm_statistics,
.phy_calibration = rtw8821c_phy_calibration,
.cck_pd_set = rtw8821c_phy_cck_pd_set,
@@ -1990,6 +1991,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .amsdu_in_ampdu = true,
.usb_tx_agg_desc_num = 3,
.hw_feature_report = true,
.c2h_ra_report_size = 7,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
index f3d971feda04..40637c079d99 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
@@ -21,7 +21,7 @@ static const struct pci_device_id rtw_8821ce_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8821ce_id_table);
static struct pci_driver rtw_8821ce_driver = {
- .name = "rtw_8821ce",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8821ce_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
index a359413369a4..6d94162213c6 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cs.c
@@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8821cs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8821cs_id_table);
static struct sdio_driver rtw_8821cs_driver = {
- .name = "rtw_8821cs",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8821cs_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index a019f4085e73..7a0fffc359e2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -48,7 +48,7 @@ static int rtw_8821cu_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8821cu_driver = {
- .name = "rtw_8821cu",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8821cu_id_table,
.probe = rtw_8821cu_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index b4934da88e33..ab199eaea3c7 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2158,6 +2158,7 @@ static const struct rtw_chip_ops rtw8822b_ops = {
.set_tx_power_index = rtw8822b_set_tx_power_index,
.set_antenna = rtw8822b_set_antenna,
.cfg_ldo25 = rtw8822b_cfg_ldo25,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.phy_calibration = rtw8822b_phy_calibration,
.pwr_track = rtw8822b_pwr_track,
@@ -2531,6 +2532,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .amsdu_in_ampdu = true,
.usb_tx_agg_desc_num = 3,
.hw_feature_report = true,
.c2h_ra_report_size = 7,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
index 4994950776cd..0bb9f70e7920 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
@@ -17,7 +17,7 @@ static const struct pci_device_id rtw_8822be_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8822be_id_table);
static struct pci_driver rtw_8822be_driver = {
- .name = "rtw_8822be",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822be_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
index 31d8645f83bd..744781dcb419 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bs.c
@@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8822bs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8822bs_id_table);
static struct sdio_driver rtw_8822bs_driver = {
- .name = "rtw_8822bs",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8822bs_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index 572d1f31832e..44e28e583964 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -77,6 +77,8 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30N */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3322, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x03d1, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* BUFFALO WI-U2-866DM */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
@@ -88,7 +90,7 @@ static int rtw8822bu_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8822bu_driver = {
- .name = "rtw_8822bu",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822bu_id_table,
.probe = rtw8822bu_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 5e53e0db177e..017d959de3ce 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3951,7 +3951,8 @@ static void rtw8822c_dpk_cal_coef1(struct rtw_dev *rtwdev)
rtw_write32(rtwdev, REG_NCTL0, 0x00001148);
rtw_write32(rtwdev, REG_NCTL0, 0x00001149);
- check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55);
+ if (!check_hw_ready(rtwdev, 0x2d9c, MASKBYTE0, 0x55))
+ rtw_warn(rtwdev, "DPK stuck, performance may be suboptimal");
rtw_write8(rtwdev, 0x1b10, 0x0);
rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 0x0000000c);
@@ -4968,6 +4969,7 @@ static const struct rtw_chip_ops rtw8822c_ops = {
.set_tx_power_index = rtw8822c_set_tx_power_index,
.set_antenna = rtw8822c_set_antenna,
.cfg_ldo25 = rtw8822c_cfg_ldo25,
+ .set_ampdu_factor = NULL,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.dpk_track = rtw8822c_dpk_track,
.phy_calibration = rtw8822c_phy_calibration,
@@ -5349,6 +5351,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
+ .amsdu_in_ampdu = true,
.usb_tx_agg_desc_num = 3,
.hw_feature_report = true,
.c2h_ra_report_size = 7,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
index e26c6bc82936..9def732480af 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
@@ -21,7 +21,7 @@ static const struct pci_device_id rtw_8822ce_id_table[] = {
MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table);
static struct pci_driver rtw_8822ce_driver = {
- .name = "rtw_8822ce",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822ce_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
index 975e81c824f2..322281e07eb8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822cs.c
@@ -20,7 +20,7 @@ static const struct sdio_device_id rtw_8822cs_id_table[] = {
MODULE_DEVICE_TABLE(sdio, rtw_8822cs_id_table);
static struct sdio_driver rtw_8822cs_driver = {
- .name = "rtw_8822cs",
+ .name = KBUILD_MODNAME,
.probe = rtw_sdio_probe,
.remove = rtw_sdio_remove,
.id_table = rtw_8822cs_id_table,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c
index 157d5102a4b1..324fd5c8bfd4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822cu.c
@@ -32,7 +32,7 @@ static int rtw8822cu_probe(struct usb_interface *intf,
}
static struct usb_driver rtw_8822cu_driver = {
- .name = "rtw_8822cu",
+ .name = KBUILD_MODNAME,
.id_table = rtw_8822cu_id_table,
.probe = rtw8822cu_probe,
.disconnect = rtw_usb_disconnect,
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 6209a49312f1..e733ed846123 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -10,6 +10,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
#include "main.h"
+#include "mac.h"
#include "debug.h"
#include "fw.h"
#include "ps.h"
@@ -677,12 +678,22 @@ static void rtw_sdio_enable_rx_aggregation(struct rtw_dev *rtwdev)
{
u8 size, timeout;
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ switch (rtwdev->chip->id) {
+ case RTW_CHIP_TYPE_8703B:
+ case RTW_CHIP_TYPE_8821A:
+ case RTW_CHIP_TYPE_8812A:
size = 0x6;
timeout = 0x6;
- } else {
+ break;
+ case RTW_CHIP_TYPE_8723D:
+ size = 0xa;
+ timeout = 0x3;
+ rtw_write8_set(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
+ break;
+ default:
size = 0xff;
timeout = 0x1;
+ break;
}
/* Make the firmware honor the size limit configured below */
@@ -718,10 +729,7 @@ static u8 rtw_sdio_get_tx_qsel(struct rtw_dev *rtwdev, struct sk_buff *skb,
case RTW_TX_QUEUE_H2C:
return TX_DESC_QSEL_H2C;
case RTW_TX_QUEUE_MGMT:
- if (rtw_chip_wcpu_11n(rtwdev))
- return TX_DESC_QSEL_HIGH;
- else
- return TX_DESC_QSEL_MGMT;
+ return TX_DESC_QSEL_MGMT;
case RTW_TX_QUEUE_HI0:
return TX_DESC_QSEL_HIGH;
default:
@@ -1157,6 +1165,7 @@ static const struct rtw_hci_ops rtw_sdio_ops = {
.link_ps = rtw_sdio_link_ps,
.interface_cfg = rtw_sdio_interface_cfg,
.dynamic_rx_agg = NULL,
+ .write_firmware_page = rtw_write_firmware_page,
.read8 = rtw_sdio_read8,
.read16 = rtw_sdio_read16,
@@ -1227,10 +1236,7 @@ static void rtw_sdio_process_tx_queue(struct rtw_dev *rtwdev,
return;
}
- if (queue <= RTW_TX_QUEUE_VO)
- rtw_sdio_indicate_tx_status(rtwdev, skb);
- else
- dev_kfree_skb_any(skb);
+ rtw_sdio_indicate_tx_status(rtwdev, skb);
}
static void rtw_sdio_tx_handler(struct work_struct *work)
@@ -1298,7 +1304,6 @@ static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
int i;
- flush_workqueue(rtwsdio->txwq);
destroy_workqueue(rtwsdio->txwq);
kfree(rtwsdio->tx_handler_data);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index c8092fa0d9f1..3b5126ffc81a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -139,7 +139,7 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
- addr, 0, data, len, 30000);
+ addr, 0, data, len, 500);
if (ret < 0 && ret != -ENODEV && count++ < 4)
rtw_err(rtwdev, "write register 0x%x failed with %d\n",
addr, ret);
@@ -165,6 +165,60 @@ static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
rtw_usb_write(rtwdev, addr, val, 4);
}
+static void rtw_usb_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
+ const u8 *data, u32 size)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ u32 addr = FW_START_ADDR_LEGACY;
+ u8 *data_dup, *buf;
+ u32 n, block_size;
+ int ret;
+
+ switch (rtwdev->chip->id) {
+ case RTW_CHIP_TYPE_8723D:
+ block_size = 254;
+ break;
+ default:
+ block_size = 196;
+ break;
+ }
+
+ data_dup = kmemdup(data, size, GFP_KERNEL);
+ if (!data_dup)
+ return;
+
+ buf = data_dup;
+
+ rtw_write32_mask(rtwdev, REG_MCUFW_CTRL, BIT_ROM_PGE, page);
+
+ while (size > 0) {
+ if (size >= block_size)
+ n = block_size;
+ else if (size >= 8)
+ n = 8;
+ else
+ n = 1;
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+ addr, 0, buf, n, 500);
+ if (ret != n) {
+ if (ret != -ENODEV)
+ rtw_err(rtwdev,
+ "write 0x%x len %d failed: %d\n",
+ addr, n, ret);
+ break;
+ }
+
+ addr += n;
+ buf += n;
+ size -= n;
+ }
+
+ kfree(data_dup);
+}
+
static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping)
{
switch (dma_mapping) {
@@ -866,6 +920,7 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
case RTW_CHIP_TYPE_8822C:
case RTW_CHIP_TYPE_8822B:
case RTW_CHIP_TYPE_8821C:
+ case RTW_CHIP_TYPE_8814A:
rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
break;
case RTW_CHIP_TYPE_8821A:
@@ -891,6 +946,7 @@ static const struct rtw_hci_ops rtw_usb_ops = {
.link_ps = rtw_usb_link_ps,
.interface_cfg = rtw_usb_interface_cfg,
.dynamic_rx_agg = rtw_usb_dynamic_rx_agg,
+ .write_firmware_page = rtw_usb_write_firmware_page,
.write8 = rtw_usb_write8,
.write16 = rtw_usb_write16,
@@ -948,7 +1004,6 @@ static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
skb_queue_purge(&rtwusb->rx_queue);
- flush_workqueue(rtwusb->rxwq);
destroy_workqueue(rtwusb->rxwq);
skb_queue_purge(&rtwusb->rx_free_queue);
@@ -977,7 +1032,6 @@ static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
- flush_workqueue(rtwusb->txwq);
destroy_workqueue(rtwusb->txwq);
rtw_usb_tx_queue_purge(rtwusb);
}
@@ -1094,7 +1148,8 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
static bool rtw_usb3_chip_old(u8 chip_id)
{
- return chip_id == RTW_CHIP_TYPE_8812A;
+ return chip_id == RTW_CHIP_TYPE_8812A ||
+ chip_id == RTW_CHIP_TYPE_8814A;
}
static bool rtw_usb3_chip_new(u8 chip_id)
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index f5dedb12c129..581d6d4154d3 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -12,6 +12,121 @@ static const guid_t rtw89_guid = GUID_INIT(0xD2A8C3E8, 0x4B69, 0x4F00,
0x82, 0xBD, 0xFE, 0x86,
0x07, 0x80, 0x3A, 0xA7);
+static u32 rtw89_acpi_traversal_object(struct rtw89_dev *rtwdev,
+ const union acpi_object *obj, u8 *pos)
+{
+ const union acpi_object *elm;
+ unsigned int i;
+ u32 sub_len;
+ u32 len = 0;
+ u8 *tmp;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ if (pos)
+ pos[len] = obj->integer.value;
+
+ len++;
+ break;
+ case ACPI_TYPE_BUFFER:
+ if (unlikely(obj->buffer.length == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "%s: invalid buffer type\n", __func__);
+ goto err;
+ }
+
+ if (pos)
+ memcpy(pos, obj->buffer.pointer, obj->buffer.length);
+
+ len += obj->buffer.length;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (unlikely(obj->package.count == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "%s: invalid package type\n", __func__);
+ goto err;
+ }
+
+ for (i = 0; i < obj->package.count; i++) {
+ elm = &obj->package.elements[i];
+ tmp = pos ? pos + len : NULL;
+
+ sub_len = rtw89_acpi_traversal_object(rtwdev, elm, tmp);
+ if (unlikely(sub_len == 0))
+ goto err;
+
+ len += sub_len;
+ }
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: unhandled type: %d\n",
+ __func__, obj->type);
+ goto err;
+ }
+
+ return len;
+
+err:
+ return 0;
+}
+
+static u32 rtw89_acpi_calculate_object_length(struct rtw89_dev *rtwdev,
+ const union acpi_object *obj)
+{
+ return rtw89_acpi_traversal_object(rtwdev, obj, NULL);
+}
+
+static struct rtw89_acpi_data *
+rtw89_acpi_evaluate_method(struct rtw89_dev *rtwdev, const char *method)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct rtw89_acpi_data *data = NULL;
+ acpi_handle root, handle;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 len;
+
+ root = ACPI_HANDLE(rtwdev->dev);
+ if (!root) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to get root\n", method);
+ return NULL;
+ }
+
+ status = acpi_get_handle(root, (acpi_string)method, &handle);
+ if (ACPI_FAILURE(status)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to get handle\n", method);
+ return NULL;
+ }
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to evaluate object\n", method);
+ return NULL;
+ }
+
+ obj = buf.pointer;
+ len = rtw89_acpi_calculate_object_length(rtwdev, obj);
+ if (unlikely(len == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi (%s): failed to traversal obj len\n", method);
+ goto out;
+ }
+
+ data = kzalloc(struct_size(data, buf, len), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ data->len = len;
+ rtw89_acpi_traversal_object(rtwdev, obj, data->buf);
+
+out:
+ ACPI_FREE(obj);
+ return data;
+}
+
static
int rtw89_acpi_dsm_get_value(struct rtw89_dev *rtwdev, union acpi_object *obj,
u8 *value)
@@ -121,6 +236,49 @@ int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev,
return 0;
}
+static bool chk_acpi_policy_tas_sig(const struct rtw89_acpi_policy_tas *p)
+{
+ return p->signature[0] == 0x52 &&
+ p->signature[1] == 0x54 &&
+ p->signature[2] == 0x4B &&
+ p->signature[3] == 0x05;
+}
+
+static int rtw89_acpi_dsm_get_policy_tas(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_tas **policy)
+{
+ const struct rtw89_acpi_policy_tas *ptr;
+ u32 buf_len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_tas_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL);
+ if (!*policy)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_tas: ", *policy,
+ sizeof(*ptr));
+ return 0;
+}
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res)
@@ -142,6 +300,8 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP)
ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj,
&res->u.policy_6ghz_sp);
+ else if (func == RTW89_ACPI_DSM_FUNC_TAS_EN)
+ ret = rtw89_acpi_dsm_get_policy_tas(rtwdev, obj, &res->u.policy_tas);
else
ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value);
@@ -152,46 +312,875 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
struct rtw89_acpi_rtag_result *res)
{
- struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_handle root, handle;
- union acpi_object *obj;
- acpi_status status;
+ const struct rtw89_acpi_data *data;
u32 buf_len;
int ret = 0;
- root = ACPI_HANDLE(rtwdev->dev);
- if (!root)
- return -EOPNOTSUPP;
-
- status = acpi_get_handle(root, (acpi_string)"RTAG", &handle);
- if (ACPI_FAILURE(status))
+ data = rtw89_acpi_evaluate_method(rtwdev, "RTAG");
+ if (!data)
return -EIO;
- status = acpi_evaluate_object(handle, NULL, NULL, &buf);
- if (ACPI_FAILURE(status))
- return -EIO;
+ buf_len = data->len;
+ if (buf_len != sizeof(*res)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ ret = -EINVAL;
+ goto out;
+ }
- obj = buf.pointer;
- if (obj->type != ACPI_TYPE_BUFFER) {
+ *res = *(struct rtw89_acpi_rtag_result *)data->buf;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+
+out:
+ kfree(data);
+ return ret;
+}
+
+enum rtw89_acpi_sar_subband rtw89_acpi_sar_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
rtw89_debug(rtwdev, RTW89_DBG_ACPI,
- "acpi: expect buffer but type: %d\n", obj->type);
- ret = -EINVAL;
+ "center freq %u to ACPI SAR subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_ACPI_SAR_2GHZ_SUBBAND;
+ case 5180 ... 5240:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_1;
+ case 5250 ... 5320:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_2;
+ case 5500 ... 5720:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_2E;
+ case 5745 ... 5885:
+ return RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4;
+ case 5955 ... 6155:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H
+ * and RTW89_ACPI_SAR_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_6ghz_span.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_ACPI_SAR_6GHZ_SUBBAND_8;
+ }
+}
+
+enum rtw89_band rtw89_acpi_sar_subband_to_band(struct rtw89_dev *rtwdev,
+ enum rtw89_acpi_sar_subband subband)
+{
+ switch (subband) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "ACPI SAR subband %u to band is unhandled\n", subband);
+ fallthrough;
+ case RTW89_ACPI_SAR_2GHZ_SUBBAND:
+ return RTW89_BAND_2G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_1:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_2:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_2E:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4:
+ return RTW89_BAND_5G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_6:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H:
+ return RTW89_BAND_6G;
+ case RTW89_ACPI_SAR_6GHZ_SUBBAND_8:
+ return RTW89_BAND_6G;
+ }
+}
+
+static u8 rtw89_acpi_sar_rfpath_to_hp_antidx(enum rtw89_rf_path rfpath)
+{
+ switch (rfpath) {
+ default:
+ case RF_PATH_B:
+ return 0;
+ case RF_PATH_A:
+ return 1;
+ }
+}
+
+static u8 rtw89_acpi_sar_rfpath_to_rt_antidx(enum rtw89_rf_path rfpath)
+{
+ switch (rfpath) {
+ default:
+ case RF_PATH_A:
+ return 0;
+ case RF_PATH_B:
+ return 1;
+ }
+}
+
+static s16 rtw89_acpi_sar_normalize_hp_val(u8 v)
+{
+ static const u8 bias = 10;
+ static const u8 fct = 1;
+ u16 res;
+
+ BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR);
+
+ res = (bias << TXPWR_FACTOR_OF_RTW89_ACPI_SAR) +
+ (v << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct));
+
+ return min_t(s32, res, MAX_VAL_OF_RTW89_ACPI_SAR);
+}
+
+static s16 rtw89_acpi_sar_normalize_rt_val(u8 v)
+{
+ static const u8 fct = 3;
+ u16 res;
+
+ BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR);
+
+ res = v << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct);
+
+ return min_t(s32, res, MAX_VAL_OF_RTW89_ACPI_SAR);
+}
+
+static
+void rtw89_acpi_sar_load_std_legacy(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_std_legacy *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ if (subband < RTW89_ACPI_SAR_SUBBAND_NR_LEGACY)
+ ent->v[subband][path] =
+ rec->normalize(ptr->v[antidx][subband]);
+ else
+ ent->v[subband][path] = MAX_VAL_OF_RTW89_ACPI_SAR;
+ }
+ }
+}
+
+static
+void rtw89_acpi_sar_load_std_has_6ghz(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_std_has_6ghz *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ BUILD_BUG_ON(RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ != NUM_OF_RTW89_ACPI_SAR_SUBBAND);
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ ent->v[subband][path] = rec->normalize(ptr->v[antidx][subband]);
+ }
+ }
+}
+
+static
+void rtw89_acpi_sar_load_sml_legacy(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_sml_legacy *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ if (subband < RTW89_ACPI_SAR_SUBBAND_NR_LEGACY)
+ ent->v[subband][path] =
+ rec->normalize(ptr->v[antidx][subband]);
+ else
+ ent->v[subband][path] = MAX_VAL_OF_RTW89_ACPI_SAR;
+ }
+ }
+}
+
+static
+void rtw89_acpi_sar_load_sml_has_6ghz(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_sar_sml_has_6ghz *ptr = content;
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+
+ BUILD_BUG_ON(RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ != NUM_OF_RTW89_ACPI_SAR_SUBBAND);
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = rec->rfpath_to_antidx(path);
+
+ ent->v[subband][path] = rec->normalize(ptr->v[antidx][subband]);
+ }
+ }
+}
+
+static s16 rtw89_acpi_geo_sar_normalize_delta(s8 delta)
+{
+ static const u8 fct = 1;
+
+ BUILD_BUG_ON(fct > TXPWR_FACTOR_OF_RTW89_ACPI_SAR);
+
+ return delta << (TXPWR_FACTOR_OF_RTW89_ACPI_SAR - fct);
+}
+
+static enum rtw89_acpi_geo_sar_regd_hp
+rtw89_acpi_geo_sar_regd_convert_hp_idx(enum rtw89_regulation_type regd)
+{
+ switch (regd) {
+ case RTW89_FCC:
+ case RTW89_IC:
+ case RTW89_NCC:
+ case RTW89_CHILE:
+ case RTW89_MEXICO:
+ return RTW89_ACPI_GEO_SAR_REGD_HP_FCC;
+ case RTW89_ETSI:
+ case RTW89_MKK:
+ case RTW89_ACMA:
+ return RTW89_ACPI_GEO_SAR_REGD_HP_ETSI;
+ default:
+ case RTW89_WW:
+ case RTW89_NA:
+ case RTW89_KCC:
+ return RTW89_ACPI_GEO_SAR_REGD_HP_WW;
+ }
+}
+
+static enum rtw89_acpi_geo_sar_regd_rt
+rtw89_acpi_geo_sar_regd_convert_rt_idx(enum rtw89_regulation_type regd)
+{
+ switch (regd) {
+ case RTW89_FCC:
+ case RTW89_NCC:
+ case RTW89_CHILE:
+ case RTW89_MEXICO:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_FCC;
+ case RTW89_ETSI:
+ case RTW89_ACMA:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_ETSI;
+ case RTW89_MKK:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_MKK;
+ case RTW89_IC:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_IC;
+ case RTW89_KCC:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_KCC;
+ default:
+ case RTW89_WW:
+ case RTW89_NA:
+ return RTW89_ACPI_GEO_SAR_REGD_RT_WW;
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_by_hp(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_geo_sar_hp_val *ptr,
+ enum rtw89_rf_path path, s16 *val)
+{
+ u8 antidx = rtw89_acpi_sar_rfpath_to_hp_antidx(path);
+ s16 delta = rtw89_acpi_geo_sar_normalize_delta(ptr->delta[antidx]);
+ s16 max = rtw89_acpi_sar_normalize_hp_val(ptr->max);
+
+ *val = clamp_t(s32, (*val) + delta, MIN_VAL_OF_RTW89_ACPI_SAR, max);
+}
+
+static
+void rtw89_acpi_geo_sar_load_by_rt(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_geo_sar_rt_val *ptr,
+ s16 *val)
+{
+ s16 delta = rtw89_acpi_geo_sar_normalize_delta(ptr->delta);
+ s16 max = rtw89_acpi_sar_normalize_rt_val(ptr->max);
+
+ *val = clamp_t(s32, (*val) + delta, MIN_VAL_OF_RTW89_ACPI_SAR, max);
+}
+
+static
+void rtw89_acpi_geo_sar_load_hp_legacy(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_hp_legacy *ptr = content;
+ const struct rtw89_acpi_geo_sar_hp_legacy_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_hp_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_hp geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_hp_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ default:
+ case RTW89_BAND_6G:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_hp(rtwdev, ptr_ent_val, path,
+ &ent->v[subband][path]);
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_hp_has_6ghz(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_hp_has_6ghz *ptr = content;
+ const struct rtw89_acpi_geo_sar_hp_has_6ghz_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_hp_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_hp geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_hp_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ case RTW89_BAND_6G:
+ ptr_ent_val = &ptr_ent->val_6ghz;
+ break;
+ default:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_hp(rtwdev, ptr_ent_val, path,
+ &ent->v[subband][path]);
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_rt_legacy(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_rt_legacy *ptr = content;
+ const struct rtw89_acpi_geo_sar_rt_legacy_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_rt_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_rt geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_rt_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ default:
+ case RTW89_BAND_6G:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_rt(rtwdev, ptr_ent_val,
+ &ent->v[subband][path]);
+ }
+}
+
+static
+void rtw89_acpi_geo_sar_load_rt_has_6ghz(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent)
+{
+ const struct rtw89_acpi_geo_sar_rt_has_6ghz *ptr = content;
+ const struct rtw89_acpi_geo_sar_rt_has_6ghz_entry *ptr_ent;
+ const struct rtw89_acpi_geo_sar_rt_val *ptr_ent_val;
+ enum rtw89_acpi_geo_sar_regd_rt geo_idx =
+ rtw89_acpi_geo_sar_regd_convert_rt_idx(regd);
+ enum rtw89_acpi_sar_subband subband;
+ enum rtw89_rf_path path;
+ enum rtw89_band band;
+
+ ptr_ent = &ptr->entries[geo_idx];
+
+ for (subband = 0; subband < NUM_OF_RTW89_ACPI_SAR_SUBBAND; subband++) {
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband);
+ switch (band) {
+ case RTW89_BAND_2G:
+ ptr_ent_val = &ptr_ent->val_2ghz;
+ break;
+ case RTW89_BAND_5G:
+ ptr_ent_val = &ptr_ent->val_5ghz;
+ break;
+ case RTW89_BAND_6G:
+ ptr_ent_val = &ptr_ent->val_6ghz;
+ break;
+ default:
+ ptr_ent_val = NULL;
+ break;
+ }
+
+ if (!ptr_ent_val)
+ continue;
+
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++)
+ rtw89_acpi_geo_sar_load_by_rt(rtwdev, ptr_ent_val,
+ &ent->v[subband][path]);
+ }
+}
+
+#define RTW89_ACPI_GEO_SAR_DECL_HANDLER(type) \
+static const struct rtw89_acpi_geo_sar_handler \
+rtw89_acpi_geo_sar_handler_ ## type = { \
+ .data_size = RTW89_ACPI_GEO_SAR_SIZE_OF(type), \
+ .load = rtw89_acpi_geo_sar_load_ ## type, \
+}
+
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(hp_legacy);
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(hp_has_6ghz);
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(rt_legacy);
+RTW89_ACPI_GEO_SAR_DECL_HANDLER(rt_has_6ghz);
+
+static const struct rtw89_acpi_sar_recognition rtw89_acpi_sar_recs[] = {
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_HP,
+ .rev = RTW89_ACPI_SAR_REV_LEGACY,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_legacy),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_hp_legacy,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_hp_antidx,
+ .normalize = rtw89_acpi_sar_normalize_hp_val,
+ .load = rtw89_acpi_sar_load_std_legacy,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_HP,
+ .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_has_6ghz),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_hp_has_6ghz,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_hp_antidx,
+ .normalize = rtw89_acpi_sar_normalize_hp_val,
+ .load = rtw89_acpi_sar_load_std_has_6ghz,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_LEGACY,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_legacy),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_legacy,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_std_legacy,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ,
+ .size = RTW89_ACPI_SAR_SIZE_OF(std_has_6ghz),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_has_6ghz,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_std_has_6ghz,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_LEGACY,
+ .size = RTW89_ACPI_SAR_SIZE_OF(sml_legacy),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_legacy,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_sml_legacy,
+ },
+ {
+ .id = {
+ .cid = RTW89_ACPI_SAR_CID_RT,
+ .rev = RTW89_ACPI_SAR_REV_HAS_6GHZ,
+ .size = RTW89_ACPI_SAR_SIZE_OF(sml_has_6ghz),
+ },
+ .geo = &rtw89_acpi_geo_sar_handler_rt_has_6ghz,
+
+ .rfpath_to_antidx = rtw89_acpi_sar_rfpath_to_rt_antidx,
+ .normalize = rtw89_acpi_sar_normalize_rt_val,
+ .load = rtw89_acpi_sar_load_sml_has_6ghz,
+ },
+};
+
+struct rtw89_acpi_sar_rec_parm {
+ u32 pld_len;
+ u8 tbl_cnt;
+ u16 cid;
+ u8 rev;
+};
+
+static const struct rtw89_acpi_sar_recognition *
+rtw89_acpi_sar_recognize(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_rec_parm *parm)
+{
+ const u32 tbl_len = parm->pld_len / parm->tbl_cnt;
+ const struct rtw89_acpi_sar_recognition *rec;
+ struct rtw89_acpi_sar_identifier id = {};
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "%s: cid %u, rev %u, tbl len %u, tbl cnt %u\n",
+ __func__, parm->cid, parm->rev, tbl_len, parm->tbl_cnt);
+
+ if (unlikely(parm->pld_len % parm->tbl_cnt)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid pld len %u\n",
+ parm->pld_len);
+ return NULL;
+ }
+
+ if (unlikely(tbl_len > RTW89_ACPI_SAR_SIZE_MAX)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid tbl len %u\n",
+ tbl_len);
+ return NULL;
+ }
+
+ if (unlikely(parm->tbl_cnt > MAX_NUM_OF_RTW89_ACPI_SAR_TBL)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid tbl cnt %u\n",
+ parm->tbl_cnt);
+ return NULL;
+ }
+
+ switch (parm->cid) {
+ case RTW89_ACPI_SAR_CID_HP:
+ case RTW89_ACPI_SAR_CID_RT:
+ id.cid = parm->cid;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid cid 0x%x\n",
+ parm->cid);
+ return NULL;
+ }
+
+ switch (parm->rev) {
+ case RTW89_ACPI_SAR_REV_LEGACY:
+ case RTW89_ACPI_SAR_REV_HAS_6GHZ:
+ id.rev = parm->rev;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid rev %u\n",
+ parm->rev);
+ return NULL;
+ }
+
+ id.size = tbl_len;
+ for (unsigned int i = 0; i < ARRAY_SIZE(rtw89_acpi_sar_recs); i++) {
+ rec = &rtw89_acpi_sar_recs[i];
+ if (memcmp(&rec->id, &id, sizeof(rec->id)) == 0)
+ return rec;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "failed to recognize\n");
+ return NULL;
+}
+
+static const struct rtw89_acpi_sar_recognition *
+rtw89_acpi_evaluate_static_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ const struct rtw89_acpi_sar_recognition *rec = NULL;
+ const struct rtw89_acpi_static_sar_hdr *hdr;
+ struct rtw89_sar_entry_from_acpi tmp = {};
+ struct rtw89_acpi_sar_rec_parm parm = {};
+ struct rtw89_sar_table_from_acpi *tbl;
+ const struct rtw89_acpi_data *data;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_STATIC_SAR);
+ if (!data)
+ return NULL;
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load static sar\n");
+
+ len = data->len;
+ if (len <= sizeof(*hdr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len);
goto out;
}
- buf_len = obj->buffer.length;
- if (buf_len != sizeof(*res)) {
- rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
- __func__, buf_len);
+ hdr = (typeof(hdr))data->buf;
+
+ parm.cid = le16_to_cpu(hdr->cid);
+ parm.rev = hdr->rev;
+ parm.tbl_cnt = 1;
+ parm.pld_len = len - sizeof(*hdr);
+
+ rec = rtw89_acpi_sar_recognize(rtwdev, &parm);
+ if (!rec)
+ goto out;
+
+ rec->load(rtwdev, rec, hdr->content, &tmp);
+
+ tbl = &cfg->tables[0];
+ for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++)
+ tbl->entries[regd] = tmp;
+
+ cfg->valid_num = 1;
+
+out:
+ kfree(data);
+ return rec;
+}
+
+static const struct rtw89_acpi_sar_recognition *
+rtw89_acpi_evaluate_dynamic_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ const struct rtw89_acpi_sar_recognition *rec = NULL;
+ const struct rtw89_acpi_dynamic_sar_hdr *hdr;
+ struct rtw89_acpi_sar_rec_parm parm = {};
+ struct rtw89_sar_table_from_acpi *tbl;
+ const struct rtw89_acpi_data *data;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_DYNAMIC_SAR);
+ if (!data)
+ return NULL;
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load dynamic sar\n");
+
+ len = data->len;
+ if (len <= sizeof(*hdr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len);
+ goto out;
+ }
+
+ hdr = (typeof(hdr))data->buf;
+
+ parm.cid = le16_to_cpu(hdr->cid);
+ parm.rev = hdr->rev;
+ parm.tbl_cnt = hdr->cnt;
+ parm.pld_len = len - sizeof(*hdr);
+
+ rec = rtw89_acpi_sar_recognize(rtwdev, &parm);
+ if (!rec)
+ goto out;
+
+ for (unsigned int i = 0; i < hdr->cnt; i++) {
+ const u8 *content = hdr->content + rec->id.size * i;
+ struct rtw89_sar_entry_from_acpi tmp = {};
+
+ rec->load(rtwdev, rec, content, &tmp);
+
+ tbl = &cfg->tables[i];
+ for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++)
+ tbl->entries[regd] = tmp;
+ }
+
+ cfg->valid_num = hdr->cnt;
+
+out:
+ kfree(data);
+ return rec;
+}
+
+int rtw89_acpi_evaluate_dynamic_sar_indicator(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg,
+ bool *poll_changed)
+{
+ struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator;
+ struct rtw89_sar_indicator_from_acpi tmp = *ind;
+ const struct rtw89_acpi_data *data;
+ const u8 *tbl_base1_by_ant;
+ enum rtw89_rf_path path;
+ int ret = 0;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_DYNAMIC_SAR_INDICATOR);
+ if (!data)
+ return -EFAULT;
+
+ if (!poll_changed)
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load dynamic sar indicator\n");
+
+ len = data->len;
+ if (len != ind->fields) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u\n", len);
ret = -EINVAL;
goto out;
}
- *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer;
+ tbl_base1_by_ant = data->buf;
- rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+ for (path = 0; path < NUM_OF_RTW89_ACPI_SAR_RF_PATH; path++) {
+ u8 antidx = ind->rfpath_to_antidx(path);
+ u8 sel;
+
+ if (antidx >= ind->fields)
+ antidx = 0;
+
+ /* convert the table index from 1-based to 0-based */
+ sel = tbl_base1_by_ant[antidx] - 1;
+ if (sel >= cfg->valid_num)
+ sel = 0;
+
+ tmp.tblsel[path] = sel;
+ }
+
+ if (memcmp(ind, &tmp, sizeof(*ind)) == 0) {
+ if (poll_changed)
+ *poll_changed = false;
+ } else {
+ if (poll_changed)
+ *poll_changed = true;
+
+ *ind = tmp;
+ }
out:
- ACPI_FREE(obj);
+ kfree(data);
return ret;
}
+
+static
+void rtw89_acpi_evaluate_geo_sar(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_geo_sar_handler *hdl,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ const struct rtw89_acpi_data *data;
+ u32 len;
+
+ data = rtw89_acpi_evaluate_method(rtwdev, RTW89_ACPI_METHOD_GEO_SAR);
+ if (!data)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "acpi load geo sar\n");
+
+ len = data->len;
+ if (len != hdl->data_size) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "invalid buf len %u (expected %u)\n",
+ len, hdl->data_size);
+ goto out;
+ }
+
+ for (unsigned int i = 0; i < cfg->valid_num; i++)
+ for (u8 regd = 0; regd < RTW89_REGD_NUM; regd++)
+ hdl->load(rtwdev, data->buf, regd, &cfg->tables[i].entries[regd]);
+
+out:
+ kfree(data);
+}
+
+int rtw89_acpi_evaluate_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg)
+{
+ struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator;
+ const struct rtw89_acpi_sar_recognition *rec;
+ bool fetch_indicator = false;
+ int ret;
+
+ rec = rtw89_acpi_evaluate_static_sar(rtwdev, cfg);
+ if (rec)
+ goto recognized;
+
+ rec = rtw89_acpi_evaluate_dynamic_sar(rtwdev, cfg);
+ if (!rec)
+ return -ENOENT;
+
+ fetch_indicator = true;
+
+recognized:
+ rtw89_acpi_evaluate_geo_sar(rtwdev, rec->geo, cfg);
+
+ switch (rec->id.cid) {
+ case RTW89_ACPI_SAR_CID_HP:
+ cfg->downgrade_2tx = 3 << TXPWR_FACTOR_OF_RTW89_ACPI_SAR;
+ ind->fields = RTW89_ACPI_SAR_ANT_NR_STD;
+ break;
+ case RTW89_ACPI_SAR_CID_RT:
+ cfg->downgrade_2tx = 0;
+ ind->fields = 1;
+ break;
+ default:
+ return -EFAULT;
+ }
+
+ if (fetch_indicator) {
+ ind->rfpath_to_antidx = rec->rfpath_to_antidx;
+ ret = rtw89_acpi_evaluate_dynamic_sar_indicator(rtwdev, cfg, NULL);
+ if (ret)
+ fetch_indicator = false;
+ }
+
+ if (!fetch_indicator)
+ memset(ind->tblsel, 0, sizeof(ind->tblsel));
+
+ ind->enable_sync = fetch_indicator;
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index b43ab106e44d..8c918ee02d2e 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -7,6 +7,11 @@
#include "core.h"
+struct rtw89_acpi_data {
+ u32 len;
+ u8 buf[] __counted_by(len);
+};
+
enum rtw89_acpi_dsm_func {
RTW89_ACPI_DSM_FUNC_IDN_BAND_SUP = 2,
RTW89_ACPI_DSM_FUNC_6G_DIS = 3,
@@ -26,6 +31,13 @@ enum rtw89_acpi_policy_mode {
RTW89_ACPI_POLICY_ALLOW = 1,
};
+enum rtw89_acpi_conf_tas {
+ RTW89_ACPI_CONF_TAS_US = BIT(0),
+ RTW89_ACPI_CONF_TAS_CA = BIT(1),
+ RTW89_ACPI_CONF_TAS_KR = BIT(2),
+ RTW89_ACPI_CONF_TAS_OTHERS = BIT(7),
+};
+
struct rtw89_acpi_country_code {
/* below are allowed:
* * ISO alpha2 country code
@@ -54,12 +66,21 @@ struct rtw89_acpi_policy_6ghz_sp {
u8 rsvd;
} __packed;
+struct rtw89_acpi_policy_tas {
+ u8 signature[4];
+ u8 revision;
+ u8 enable;
+ u8 enabled_countries;
+ u8 rsvd[3];
+} __packed;
+
struct rtw89_acpi_dsm_result {
union {
u8 value;
/* caller needs to free it after using */
struct rtw89_acpi_policy_6ghz *policy_6ghz;
struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp;
+ struct rtw89_acpi_policy_tas *policy_tas;
} u;
};
@@ -70,10 +91,179 @@ struct rtw89_acpi_rtag_result {
u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
} __packed;
+enum rtw89_acpi_sar_cid {
+ RTW89_ACPI_SAR_CID_HP = 0x5048,
+ RTW89_ACPI_SAR_CID_RT = 0x5452,
+};
+
+enum rtw89_acpi_sar_rev {
+ RTW89_ACPI_SAR_REV_LEGACY = 1,
+ RTW89_ACPI_SAR_REV_HAS_6GHZ = 2,
+};
+
+#define RTW89_ACPI_SAR_ANT_NR_STD 4
+#define RTW89_ACPI_SAR_ANT_NR_SML 2
+
+#define RTW89_ACPI_METHOD_STATIC_SAR "WRDS"
+#define RTW89_ACPI_METHOD_DYNAMIC_SAR "RWRD"
+#define RTW89_ACPI_METHOD_DYNAMIC_SAR_INDICATOR "RWSI"
+#define RTW89_ACPI_METHOD_GEO_SAR "RWGS"
+
+struct rtw89_acpi_sar_std_legacy {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_STD][RTW89_ACPI_SAR_SUBBAND_NR_LEGACY];
+} __packed;
+
+struct rtw89_acpi_sar_std_has_6ghz {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_STD][RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ];
+} __packed;
+
+struct rtw89_acpi_sar_sml_legacy {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_SML][RTW89_ACPI_SAR_SUBBAND_NR_LEGACY];
+} __packed;
+
+struct rtw89_acpi_sar_sml_has_6ghz {
+ u8 v[RTW89_ACPI_SAR_ANT_NR_SML][RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ];
+} __packed;
+
+struct rtw89_acpi_static_sar_hdr {
+ __le16 cid;
+ u8 rev;
+ u8 content[];
+} __packed;
+
+struct rtw89_acpi_dynamic_sar_hdr {
+ __le16 cid;
+ u8 rev;
+ u8 cnt;
+ u8 content[];
+} __packed;
+
+struct rtw89_acpi_sar_identifier {
+ enum rtw89_acpi_sar_cid cid;
+ enum rtw89_acpi_sar_rev rev;
+ u8 size;
+};
+
+/* for rtw89_acpi_sar_identifier::size */
+#define RTW89_ACPI_SAR_SIZE_MAX U8_MAX
+#define RTW89_ACPI_SAR_SIZE_OF(type) \
+ (BUILD_BUG_ON_ZERO(sizeof(struct rtw89_acpi_sar_ ## type) > \
+ RTW89_ACPI_SAR_SIZE_MAX) + \
+ sizeof(struct rtw89_acpi_sar_ ## type))
+
+struct rtw89_acpi_sar_recognition {
+ struct rtw89_acpi_sar_identifier id;
+ const struct rtw89_acpi_geo_sar_handler *geo;
+
+ u8 (*rfpath_to_antidx)(enum rtw89_rf_path rfpath);
+ s16 (*normalize)(u8 v);
+ void (*load)(struct rtw89_dev *rtwdev,
+ const struct rtw89_acpi_sar_recognition *rec,
+ const void *content,
+ struct rtw89_sar_entry_from_acpi *ent);
+};
+
+struct rtw89_acpi_geo_sar_hp_val {
+ u8 max;
+ s8 delta[RTW89_ACPI_SAR_ANT_NR_STD];
+} __packed;
+
+struct rtw89_acpi_geo_sar_hp_legacy_entry {
+ struct rtw89_acpi_geo_sar_hp_val val_2ghz;
+ struct rtw89_acpi_geo_sar_hp_val val_5ghz;
+} __packed;
+
+struct rtw89_acpi_geo_sar_hp_has_6ghz_entry {
+ struct rtw89_acpi_geo_sar_hp_val val_2ghz;
+ struct rtw89_acpi_geo_sar_hp_val val_5ghz;
+ struct rtw89_acpi_geo_sar_hp_val val_6ghz;
+} __packed;
+
+enum rtw89_acpi_geo_sar_regd_hp {
+ RTW89_ACPI_GEO_SAR_REGD_HP_FCC = 0,
+ RTW89_ACPI_GEO_SAR_REGD_HP_ETSI = 1,
+ RTW89_ACPI_GEO_SAR_REGD_HP_WW = 2,
+
+ RTW89_ACPI_GEO_SAR_REGD_NR_HP,
+};
+
+struct rtw89_acpi_geo_sar_hp_legacy {
+ struct rtw89_acpi_geo_sar_hp_legacy_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_HP];
+} __packed;
+
+struct rtw89_acpi_geo_sar_hp_has_6ghz {
+ struct rtw89_acpi_geo_sar_hp_has_6ghz_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_HP];
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_val {
+ u8 max;
+ s8 delta;
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_legacy_entry {
+ struct rtw89_acpi_geo_sar_rt_val val_2ghz;
+ struct rtw89_acpi_geo_sar_rt_val val_5ghz;
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_has_6ghz_entry {
+ struct rtw89_acpi_geo_sar_rt_val val_2ghz;
+ struct rtw89_acpi_geo_sar_rt_val val_5ghz;
+ struct rtw89_acpi_geo_sar_rt_val val_6ghz;
+} __packed;
+
+enum rtw89_acpi_geo_sar_regd_rt {
+ RTW89_ACPI_GEO_SAR_REGD_RT_FCC = 0,
+ RTW89_ACPI_GEO_SAR_REGD_RT_ETSI = 1,
+ RTW89_ACPI_GEO_SAR_REGD_RT_MKK = 2,
+ RTW89_ACPI_GEO_SAR_REGD_RT_IC = 3,
+ RTW89_ACPI_GEO_SAR_REGD_RT_KCC = 4,
+ RTW89_ACPI_GEO_SAR_REGD_RT_WW = 5,
+
+ RTW89_ACPI_GEO_SAR_REGD_NR_RT,
+};
+
+struct rtw89_acpi_geo_sar_rt_legacy {
+ struct rtw89_acpi_geo_sar_rt_legacy_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_RT];
+} __packed;
+
+struct rtw89_acpi_geo_sar_rt_has_6ghz {
+ struct rtw89_acpi_geo_sar_rt_has_6ghz_entry
+ entries[RTW89_ACPI_GEO_SAR_REGD_NR_RT];
+} __packed;
+
+struct rtw89_acpi_geo_sar_handler {
+ u8 data_size;
+
+ void (*load)(struct rtw89_dev *rtwdev,
+ const void *content,
+ enum rtw89_regulation_type regd,
+ struct rtw89_sar_entry_from_acpi *ent);
+};
+
+/* for rtw89_acpi_geo_sar_handler::data_size */
+#define RTW89_ACPI_GEO_SAR_SIZE_MAX U8_MAX
+#define RTW89_ACPI_GEO_SAR_SIZE_OF(type) \
+ (BUILD_BUG_ON_ZERO(sizeof(struct rtw89_acpi_geo_sar_ ## type) > \
+ RTW89_ACPI_GEO_SAR_SIZE_MAX) + \
+ sizeof(struct rtw89_acpi_geo_sar_ ## type))
+
+enum rtw89_acpi_sar_subband rtw89_acpi_sar_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq);
+enum rtw89_band rtw89_acpi_sar_subband_to_band(struct rtw89_dev *rtwdev,
+ enum rtw89_acpi_sar_subband subband);
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res);
int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
struct rtw89_acpi_rtag_result *res);
+int rtw89_acpi_evaluate_sar(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg);
+int rtw89_acpi_evaluate_dynamic_sar_indicator(struct rtw89_dev *rtwdev,
+ struct rtw89_sar_cfg_acpi *cfg,
+ bool *changed);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index eca3d767ff60..385a238fe5cc 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -6,6 +6,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "ps.h"
static struct sk_buff *
rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev,
@@ -469,11 +470,17 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
bool ext_key = false;
int ret;
+ if (ieee80211_vif_is_mld(vif) && !chip->hw_mlo_bmc_crypto &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
+ rtw89_leave_ips_by_hwflags(rtwdev);
hw_key_type = RTW89_SEC_KEY_TYPE_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
+ rtw89_leave_ips_by_hwflags(rtwdev);
hw_key_type = RTW89_SEC_KEY_TYPE_WEP104;
break;
case WLAN_CIPHER_SUITE_TKIP:
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index f60e93870b09..806f42429a29 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -189,9 +189,10 @@ void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
}
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_idx idx,
+ struct rtw89_vif_link *rtwvif_link,
const struct cfg80211_chan_def *chandef)
{
+ enum rtw89_chanctx_idx idx = rtwvif_link->chanctx_idx;
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_chanctx_idx cur;
@@ -205,6 +206,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
}
hal->roc_chandef = *chandef;
+ hal->roc_link_index = rtw89_vif_link_inst_get_index(rtwvif_link);
} else {
cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
RTW89_CHANCTX_IDLE);
@@ -339,11 +341,10 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx != RTW89_CHANCTX_IDLE) {
- /* ROC is ongoing (given ROC runs on RTW89_ROC_BY_LINK_INDEX).
- * If @link_index is the same as RTW89_ROC_BY_LINK_INDEX, get
- * the ongoing ROC chanctx.
+ /* ROC is ongoing (given ROC runs on @hal->roc_link_index).
+ * If @link_index is the same, get the ongoing ROC chanctx.
*/
- if (link_index == RTW89_ROC_BY_LINK_INDEX)
+ if (link_index == hal->roc_link_index)
chanctx_idx = roc_idx;
}
@@ -358,12 +359,41 @@ dflt:
}
EXPORT_SYMBOL(__rtw89_mgnt_chan_get);
+static enum rtw89_mlo_dbcc_mode
+rtw89_entity_sel_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws)
+{
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
+ return MLO_DBCC_NOT_SUPPORT;
+
+ switch (active_hws) {
+ case BIT(0):
+ return MLO_2_PLUS_0_1RF;
+ case BIT(1):
+ return MLO_0_PLUS_2_1RF;
+ case BIT(0) | BIT(1):
+ default:
+ return MLO_1_PLUS_1_1RF;
+ }
+}
+
+static
+void rtw89_entity_recalc_mlo_dbcc_mode(struct rtw89_dev *rtwdev, u8 active_hws)
+{
+ enum rtw89_mlo_dbcc_mode mode;
+
+ mode = rtw89_entity_sel_mlo_dbcc_mode(rtwdev, active_hws);
+ rtwdev->mlo_dbcc_mode = mode;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "recalc mlo dbcc mode to %d\n", mode);
+}
+
static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
struct rtw89_vif_link *link;
struct rtw89_vif *role;
+ u8 active_hws = 0;
u8 pos = 0;
int i, j;
@@ -412,10 +442,13 @@ fill:
continue;
mgnt->chanctx_tbl[pos][i] = link->chanctx_idx;
+ active_hws |= BIT(i);
}
mgnt->active_roles[pos++] = role;
}
+
+ rtw89_entity_recalc_mlo_dbcc_mode(rtwdev, active_hws);
}
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
@@ -557,7 +590,9 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
u64 sync_tsf = READ_ONCE(rtwvif_link->sync_bcn_tsf);
u32 remainder;
- if (tsf < sync_tsf) {
+ if (role->is_go) {
+ sync_tsf = 0;
+ } else if (tsf < sync_tsf) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC get tbtt ofst: tsf might not update yet\n");
sync_tsf = 0;
@@ -691,19 +726,13 @@ static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_vif *target = mcc_role->rtwvif_link->rtwvif;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
- struct rtw89_dev *rtwdev = rtwsta->rtwdev;
- struct rtw89_sta_link *rtwsta_link;
+ u8 macid;
if (rtwvif != target)
return;
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
- if (unlikely(!rtwsta_link)) {
- rtw89_err(rtwdev, "mcc sta macid: find no link on HW-0\n");
- return;
- }
-
- rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta_link->mac_id);
+ macid = rtw89_sta_get_main_macid(rtwsta);
+ rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, macid);
}
static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev,
@@ -747,9 +776,11 @@ static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
int ret;
int i;
- if (!mcc_role->is_go && !mcc_role->is_gc)
+ if (!mcc_role->is_gc)
return;
+ rtw89_p2p_noa_once_recalc(rtwvif_link);
+
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
@@ -785,6 +816,9 @@ fill:
}
tsf_lmt = (tsf & GENMASK_ULL(63, 32)) | start_time;
+ if (tsf_lmt < tsf)
+ tsf_lmt += roundup_u64(tsf - tsf_lmt, interval);
+
max_toa_us = rtw89_mcc_get_tbtt_ofst(rtwdev, mcc_role, tsf_lmt);
max_dur_us = interval - duration;
max_tob_us = max_dur_us - max_toa_us;
@@ -929,6 +963,15 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
return 0;
}
+static bool rtw89_mcc_can_courtesy(const struct rtw89_mcc_role *provider,
+ const struct rtw89_mcc_role *receiver)
+{
+ if (provider->is_go || receiver->is_gc)
+ return false;
+
+ return true;
+}
+
static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
const struct rtw89_mcc_pattern *new)
{
@@ -937,37 +980,44 @@ static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_courtesy_cfg *crtz;
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC assign pattern: ref {%d | %d}, aux {%d | %d}\n",
new->tob_ref, new->toa_ref, new->tob_aux, new->toa_aux);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC pattern plan: %d\n", new->plan);
+
*pattern = *new;
memset(&pattern->courtesy, 0, sizeof(pattern->courtesy));
- if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) {
- pattern->courtesy.macid_tgt = aux->rtwvif_link->mac_id;
- pattern->courtesy.macid_src = ref->rtwvif_link->mac_id;
- pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
- pattern->courtesy.enable = true;
- } else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) {
- pattern->courtesy.macid_tgt = ref->rtwvif_link->mac_id;
- pattern->courtesy.macid_src = aux->rtwvif_link->mac_id;
- pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
- pattern->courtesy.enable = true;
+ if (RTW89_MCC_REQ_COURTESY(pattern, aux) && rtw89_mcc_can_courtesy(ref, aux)) {
+ crtz = &pattern->courtesy.ref;
+ ref->crtz = crtz;
+
+ crtz->macid_tgt = aux->rtwvif_link->mac_id;
+ crtz->slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC courtesy ref: tgt %d, slot %d\n",
+ crtz->macid_tgt, crtz->slot_num);
+ } else {
+ ref->crtz = NULL;
}
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC pattern flags: plan %d, courtesy_en %d\n",
- pattern->plan, pattern->courtesy.enable);
+ if (RTW89_MCC_REQ_COURTESY(pattern, ref) && rtw89_mcc_can_courtesy(aux, ref)) {
+ crtz = &pattern->courtesy.aux;
+ aux->crtz = crtz;
- if (!pattern->courtesy.enable)
- return;
+ crtz->macid_tgt = ref->rtwvif_link->mac_id;
+ crtz->slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC pattern courtesy: tgt %d, src %d, slot %d\n",
- pattern->courtesy.macid_tgt, pattern->courtesy.macid_src,
- pattern->courtesy.slot_num);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC courtesy aux: tgt %d, slot %d\n",
+ crtz->macid_tgt, crtz->slot_num);
+ } else {
+ aux->crtz = NULL;
+ }
}
/* The follow-up roughly shows the relationship between the parameters
@@ -992,6 +1042,7 @@ static void __rtw89_mcc_calc_pattern_loose(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
+ u16 mcc_intvl = config->mcc_interval;
u16 bcn_ofst = config->beacon_offset;
u16 bt_dur_in_mid = 0;
u16 max_bcn_ofst;
@@ -1025,7 +1076,7 @@ calc:
res = bcn_ofst - bt_dur_in_mid;
upper = min_t(s16, ref->duration, res);
- lower = 0;
+ lower = max_t(s16, 0, ref->duration - (mcc_intvl - bcn_ofst));
if (ref->limit.enable) {
upper = min_t(s16, upper, ref->limit.max_toa);
@@ -1136,6 +1187,107 @@ static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev,
return 0;
}
+static void __rtw89_mcc_fill_ptrn_anchor_ref(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_pattern *ptrn,
+ bool small_bcn_ofst)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ u16 bcn_ofst = config->beacon_offset;
+ u16 ref_tob;
+ u16 ref_toa;
+
+ if (ref->limit.enable) {
+ ref_tob = ref->limit.max_tob;
+ ref_toa = ref->limit.max_toa;
+ } else {
+ ref_tob = ref->duration / 2;
+ ref_toa = ref->duration / 2;
+ }
+
+ if (small_bcn_ofst) {
+ ptrn->toa_ref = ref_toa;
+ ptrn->tob_ref = ref->duration - ptrn->toa_ref;
+ } else {
+ ptrn->tob_ref = ref_tob;
+ ptrn->toa_ref = ref->duration - ptrn->tob_ref;
+ }
+
+ ptrn->tob_aux = bcn_ofst - ptrn->toa_ref;
+ ptrn->toa_aux = aux->duration - ptrn->tob_aux;
+}
+
+static void __rtw89_mcc_fill_ptrn_anchor_aux(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_pattern *ptrn,
+ bool small_bcn_ofst)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ u16 bcn_ofst = config->beacon_offset;
+ u16 aux_tob;
+ u16 aux_toa;
+
+ if (aux->limit.enable) {
+ aux_tob = aux->limit.max_tob;
+ aux_toa = aux->limit.max_toa;
+ } else {
+ aux_tob = aux->duration / 2;
+ aux_toa = aux->duration / 2;
+ }
+
+ if (small_bcn_ofst) {
+ ptrn->tob_aux = aux_tob;
+ ptrn->toa_aux = aux->duration - ptrn->tob_aux;
+ } else {
+ ptrn->toa_aux = aux_toa;
+ ptrn->tob_aux = aux->duration - ptrn->toa_aux;
+ }
+
+ ptrn->toa_ref = bcn_ofst - ptrn->tob_aux;
+ ptrn->tob_ref = ref->duration - ptrn->toa_ref;
+}
+
+static int __rtw89_mcc_calc_pattern_anchor(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_pattern *ptrn,
+ bool hdl_bt)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ u16 mcc_intvl = config->mcc_interval;
+ u16 bcn_ofst = config->beacon_offset;
+ bool small_bcn_ofst;
+
+ if (bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
+ small_bcn_ofst = true;
+ else if (mcc_intvl - bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
+ small_bcn_ofst = false;
+ else
+ return -EPERM;
+
+ *ptrn = (typeof(*ptrn)){
+ .plan = hdl_bt ? RTW89_MCC_PLAN_TAIL_BT : RTW89_MCC_PLAN_NO_BT,
+ };
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC calc ptrn_ac: plan %d, bcn_ofst %d\n",
+ ptrn->plan, bcn_ofst);
+
+ if (ref->is_go || ref->is_gc)
+ __rtw89_mcc_fill_ptrn_anchor_ref(rtwdev, ptrn, small_bcn_ofst);
+ else if (aux->is_go || aux->is_gc)
+ __rtw89_mcc_fill_ptrn_anchor_aux(rtwdev, ptrn, small_bcn_ofst);
+ else
+ __rtw89_mcc_fill_ptrn_anchor_ref(rtwdev, ptrn, small_bcn_ofst);
+
+ return 0;
+}
+
static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1189,6 +1341,10 @@ static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt)
goto done;
}
+ ret = __rtw89_mcc_calc_pattern_anchor(rtwdev, &ptrn, hdl_bt);
+ if (!ret)
+ goto done;
+
__rtw89_mcc_calc_pattern_loose(rtwdev, &ptrn, hdl_bt);
done:
@@ -1439,88 +1595,41 @@ static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev)
return false;
}
-static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
- struct rtw89_mcc_role *tgt,
- struct rtw89_mcc_role *src,
- bool ref_is_src)
-{
- struct rtw89_mcc_info *mcc = &rtwdev->mcc;
- struct rtw89_mcc_config *config = &mcc->config;
- u16 beacon_offset_us = ieee80211_tu_to_usec(config->beacon_offset);
- u32 bcn_intvl_src_us = ieee80211_tu_to_usec(src->beacon_interval);
- u32 cur_tbtt_ofst_src;
- u32 tsf_ofst_tgt;
- u32 remainder;
- u64 tbtt_tgt;
- u64 tsf_src;
- int ret;
-
- ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif_link, &tsf_src);
- if (ret) {
- rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
- return;
- }
-
- cur_tbtt_ofst_src = rtw89_mcc_get_tbtt_ofst(rtwdev, src, tsf_src);
-
- if (ref_is_src)
- tbtt_tgt = tsf_src - cur_tbtt_ofst_src + beacon_offset_us;
- else
- tbtt_tgt = tsf_src - cur_tbtt_ofst_src +
- (bcn_intvl_src_us - beacon_offset_us);
-
- div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder);
- tsf_ofst_tgt = bcn_intvl_src_us - remainder;
-
- config->sync.macid_tgt = tgt->rtwvif_link->mac_id;
- config->sync.band_tgt = tgt->rtwvif_link->mac_idx;
- config->sync.port_tgt = tgt->rtwvif_link->port;
- config->sync.macid_src = src->rtwvif_link->mac_id;
- config->sync.band_src = src->rtwvif_link->mac_idx;
- config->sync.port_src = src->rtwvif_link->port;
- config->sync.offset = tsf_ofst_tgt / 1024;
- config->sync.enable = true;
-
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC sync tbtt: tgt %d, src %d, offset %d\n",
- config->sync.macid_tgt, config->sync.macid_src,
- config->sync.offset);
-
- rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif_link, src->rtwvif_link,
- config->sync.offset);
-}
-
static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
- u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref);
- struct rtw89_vif_link *rtwvif_link = ref->rtwvif_link;
+ s32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref);
u64 tsf, start_tsf;
u32 cur_tbtt_ofst;
u64 min_time;
+ u64 tsf_aux;
int ret;
- ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
- if (ret) {
- rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_req_tsf(rtwdev, &tsf, &tsf_aux);
+ else
+ ret = __mcc_fw_req_tsf(rtwdev, &tsf, &tsf_aux);
+
+ if (ret)
return ret;
- }
min_time = tsf;
- if (ref->is_go)
+ if (ref->is_go || aux->is_go)
min_time += ieee80211_tu_to_usec(RTW89_MCC_SHORT_TRIGGER_TIME);
else
min_time += ieee80211_tu_to_usec(RTW89_MCC_LONG_TRIGGER_TIME);
cur_tbtt_ofst = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf);
start_tsf = tsf - cur_tbtt_ofst + bcn_intvl_ref_us - tob_ref_us;
- while (start_tsf < min_time)
- start_tsf += bcn_intvl_ref_us;
+ if (start_tsf < min_time)
+ start_tsf += roundup_u64(min_time - start_tsf, bcn_intvl_ref_us);
config->start_tsf = start_tsf;
+ config->start_tsf_in_aux_domain = tsf_aux + start_tsf - tsf;
return 0;
}
@@ -1537,13 +1646,11 @@ static int rtw89_mcc_fill_config(struct rtw89_dev *rtwdev)
switch (mcc->mode) {
case RTW89_MCC_MODE_GO_STA:
- config->beacon_offset = RTW89_MCC_DFLT_BCN_OFST_TIME;
+ config->beacon_offset = rtw89_mcc_get_bcn_ofst(rtwdev);
if (ref->is_go) {
- rtw89_mcc_sync_tbtt(rtwdev, ref, aux, false);
config->mcc_interval = ref->beacon_interval;
rtw89_mcc_set_duration_go_sta(rtwdev, ref, aux);
} else {
- rtw89_mcc_sync_tbtt(rtwdev, aux, ref, true);
config->mcc_interval = aux->beacon_interval;
rtw89_mcc_set_duration_go_sta(rtwdev, aux, ref);
}
@@ -1573,10 +1680,8 @@ bottom:
static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role)
{
+ const struct rtw89_mcc_courtesy_cfg *crtz = role->crtz;
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
- struct rtw89_mcc_config *config = &mcc->config;
- struct rtw89_mcc_pattern *pattern = &config->pattern;
- struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
struct rtw89_mcc_policy *policy = &role->policy;
struct rtw89_fw_mcc_add_req req = {};
const struct rtw89_chan *chan;
@@ -1599,9 +1704,9 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
req.duration = role->duration;
req.btc_in_2g = false;
- if (courtesy->enable && courtesy->macid_src == req.macid) {
- req.courtesy_target = courtesy->macid_tgt;
- req.courtesy_num = courtesy->slot_num;
+ if (crtz) {
+ req.courtesy_target = crtz->macid_tgt;
+ req.courtesy_num = crtz->slot_num;
req.courtesy_en = true;
}
@@ -1781,26 +1886,23 @@ static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
- struct rtw89_mcc_config *config = &mcc->config;
- struct rtw89_mcc_pattern *pattern = &config->pattern;
- struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
struct rtw89_fw_mrc_add_slot_arg *slot_arg_src;
- u8 slot_idx_tgt;
- if (!courtesy->enable)
- return;
-
- if (courtesy->macid_src == ref->rtwvif_link->mac_id) {
+ if (ref->crtz) {
slot_arg_src = &arg->slots[ref->slot_idx];
- slot_idx_tgt = aux->slot_idx;
- } else {
- slot_arg_src = &arg->slots[aux->slot_idx];
- slot_idx_tgt = ref->slot_idx;
+
+ slot_arg_src->courtesy_target = aux->slot_idx;
+ slot_arg_src->courtesy_period = ref->crtz->slot_num;
+ slot_arg_src->courtesy_en = true;
}
- slot_arg_src->courtesy_target = slot_idx_tgt;
- slot_arg_src->courtesy_period = courtesy->slot_num;
- slot_arg_src->courtesy_en = true;
+ if (aux->crtz) {
+ slot_arg_src = &arg->slots[aux->slot_idx];
+
+ slot_arg_src->courtesy_target = ref->slot_idx;
+ slot_arg_src->courtesy_period = aux->crtz->slot_num;
+ slot_arg_src->courtesy_en = true;
+ }
}
static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace)
@@ -2000,30 +2102,24 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
- struct rtw89_mcc_pattern *pattern = &config->pattern;
- struct rtw89_mcc_sync *sync = &config->sync;
struct ieee80211_p2p_noa_desc noa_desc = {};
- u64 start_time = config->start_tsf;
u32 interval = config->mcc_interval;
struct rtw89_vif_link *rtwvif_go;
+ u64 start_time;
u32 duration;
if (mcc->mode != RTW89_MCC_MODE_GO_STA)
return;
if (ref->is_go) {
+ start_time = config->start_tsf;
rtwvif_go = ref->rtwvif_link;
start_time += ieee80211_tu_to_usec(ref->duration);
duration = config->mcc_interval - ref->duration;
} else if (aux->is_go) {
+ start_time = config->start_tsf_in_aux_domain;
rtwvif_go = aux->rtwvif_link;
- start_time += ieee80211_tu_to_usec(pattern->tob_ref) +
- ieee80211_tu_to_usec(config->beacon_offset) +
- ieee80211_tu_to_usec(pattern->toa_aux);
duration = config->mcc_interval - aux->duration;
-
- /* convert time domain from sta(ref) to GO(aux) */
- start_time += ieee80211_tu_to_usec(sync->offset);
} else {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC find no GO: skip updating beacon NoA\n");
@@ -2127,6 +2223,12 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
}
struct rtw89_mcc_stop_sel {
+ struct {
+ const struct rtw89_vif_link *target;
+ } hint;
+
+ /* selection content */
+ bool filled;
u8 mac_id;
u8 slot_idx;
};
@@ -2136,6 +2238,7 @@ static void rtw89_mcc_stop_sel_fill(struct rtw89_mcc_stop_sel *sel,
{
sel->mac_id = mcc_role->rtwvif_link->mac_id;
sel->slot_idx = mcc_role->slot_idx;
+ sel->filled = true;
}
static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
@@ -2145,23 +2248,41 @@ static int rtw89_mcc_stop_sel_iterator(struct rtw89_dev *rtwdev,
{
struct rtw89_mcc_stop_sel *sel = data;
+ if (mcc_role->rtwvif_link == sel->hint.target) {
+ rtw89_mcc_stop_sel_fill(sel, mcc_role);
+ return 1; /* break iteration */
+ }
+
+ if (sel->filled)
+ return 0;
+
if (!mcc_role->rtwvif_link->chanctx_assigned)
return 0;
rtw89_mcc_stop_sel_fill(sel, mcc_role);
- return 1; /* break iteration */
+ return 0;
}
-static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
+static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
+ const struct rtw89_chanctx_pause_parm *pause)
{
+ struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
- struct rtw89_mcc_stop_sel sel;
+ struct rtw89_mcc_stop_sel sel = {
+ .hint.target = pause ? pause->trigger : NULL,
+ };
int ret;
+ if (!pause) {
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->chanctx_work);
+ bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
+ }
+
/* by default, stop at ref */
- rtw89_mcc_stop_sel_fill(&sel, ref);
rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel);
+ if (!sel.filled)
+ rtw89_mcc_stop_sel_fill(&sel, ref);
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop at <macid %d>\n", sel.mac_id);
@@ -2193,6 +2314,7 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_config old_cfg = *config;
+ bool courtesy_changed;
bool sync_changed;
int ret;
@@ -2205,8 +2327,15 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ if (memcmp(&old_cfg.pattern.courtesy, &config->pattern.courtesy,
+ sizeof(old_cfg.pattern.courtesy)) == 0)
+ courtesy_changed = false;
+ else
+ courtesy_changed = true;
+
if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
- config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
+ config->pattern.plan != RTW89_MCC_PLAN_NO_BT ||
+ courtesy_changed) {
if (rtw89_concurrent_via_mrc(rtwdev))
ret = __mrc_fw_start(rtwdev, true);
else
@@ -2238,7 +2367,7 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_pattern *pattern = &config->pattern;
- s16 tolerance;
+ u16 tolerance;
u16 bcn_ofst;
u16 diff;
@@ -2246,18 +2375,25 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
return;
bcn_ofst = rtw89_mcc_get_bcn_ofst(rtwdev);
+ if (bcn_ofst == config->beacon_offset)
+ return;
+
if (bcn_ofst > config->beacon_offset) {
diff = bcn_ofst - config->beacon_offset;
if (pattern->tob_aux < 0)
tolerance = -pattern->tob_aux;
- else
+ else if (pattern->toa_aux > 0)
tolerance = pattern->toa_aux;
+ else
+ return; /* no chance to improve */
} else {
diff = config->beacon_offset - bcn_ofst;
if (pattern->toa_aux < 0)
tolerance = -pattern->toa_aux;
- else
+ else if (pattern->tob_aux > 0)
tolerance = pattern->tob_aux;
+ else
+ return; /* no chance to improve */
}
if (diff <= tolerance)
@@ -2504,7 +2640,7 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev)
}
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_pause_reasons rsn)
+ const struct rtw89_chanctx_pause_parm *pause_parm)
{
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
@@ -2514,12 +2650,12 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
if (hal->entity_pause)
return;
- rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", rsn);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", pause_parm->rsn);
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
+ rtw89_mcc_stop(rtwdev, pause_parm);
break;
default:
break;
@@ -2744,7 +2880,7 @@ out:
cur = rtw89_get_entity_mode(rtwdev);
switch (cur) {
case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
+ rtw89_mcc_stop(rtwdev, NULL);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index e6391f6f2aa7..2a25563593af 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -31,6 +31,14 @@
#define RTW89_MCC_DFLT_TX_NULL_EARLY 3
#define RTW89_MCC_DFLT_COURTESY_SLOT 3
+#define RTW89_MCC_REQ_COURTESY_TIME 5
+#define RTW89_MCC_REQ_COURTESY(pattern, role) \
+({ \
+ const struct rtw89_mcc_pattern *p = pattern; \
+ p->tob_ ## role <= RTW89_MCC_REQ_COURTESY_TIME || \
+ p->toa_ ## role <= RTW89_MCC_REQ_COURTESY_TIME; \
+})
+
#define NUM_OF_RTW89_MCC_ROLES 2
enum rtw89_chanctx_pause_reasons {
@@ -38,6 +46,11 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_chanctx_pause_parm {
+ const struct rtw89_vif_link *trigger;
+ enum rtw89_chanctx_pause_reasons rsn;
+};
+
struct rtw89_chanctx_cb_parm {
int (*cb)(struct rtw89_dev *rtwdev, void *data);
void *data;
@@ -95,7 +108,7 @@ void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef);
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_idx idx,
+ struct rtw89_vif_link *rtwvif_link,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
@@ -105,7 +118,7 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_changes change);
void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
- enum rtw89_chanctx_pause_reasons rsn);
+ const struct rtw89_chanctx_pause_parm *parm);
void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
const struct rtw89_chanctx_cb_parm *cb_parm);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index cc9b014457ac..49447668cbf3 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -203,6 +203,23 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
},
};
+static const u8 rtw89_ext_capa_sta[] = {
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const struct wiphy_iftype_ext_capab rtw89_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = rtw89_ext_capa_sta,
+ .extended_capabilities_mask = rtw89_ext_capa_sta,
+ .extended_capabilities_len = sizeof(rtw89_ext_capa_sta),
+ /* relevant only if EHT is supported */
+ .eml_capabilities = 0,
+ .mld_capa_and_ops = 0,
+ },
+};
+
#define RTW89_6GHZ_SPAN_HEAD 6145
#define RTW89_6GHZ_SPAN_IDX(center_freq) \
((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
@@ -211,6 +228,8 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
[RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
.sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
.sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ .acpi_sar_subband_low = RTW89_ACPI_SAR_6GHZ_ ## subband_l, \
+ .acpi_sar_subband_high = RTW89_ACPI_SAR_6GHZ_ ## subband_h, \
.ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
.ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
}
@@ -639,9 +658,17 @@ out:
static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link;
+ if (desc_info->mlo && !desc_info->sw_mld) {
+ if (rtwsta_link)
+ return rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
+ else
+ return rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
+ }
+
if (!rtwsta_link)
return rtwvif_link->mac_id;
@@ -671,7 +698,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
u8 qsel, ch_dma;
- qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
+ qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
desc_info->qsel = qsel;
@@ -1104,39 +1131,23 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ struct sk_buff *skb, int *qsel, bool sw_mld)
{
- struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
- struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
- struct rtw89_core_tx_request tx_req = {0};
- struct rtw89_sta_link *rtwsta_link = NULL;
- struct rtw89_vif_link *rtwvif_link;
+ struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_core_tx_request tx_req = {};
int ret;
- /* By default, driver writes tx via the link on HW-0. And then,
- * according to links' status, HW can change tx to another link.
- */
-
- if (rtwsta) {
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
- if (unlikely(!rtwsta_link)) {
- rtw89_err(rtwdev, "tx: find no sta link on HW-0\n");
- return -ENOLINK;
- }
- }
-
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
- if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "tx: find no vif link on HW-0\n");
- return -ENOLINK;
- }
-
tx_req.skb = skb;
tx_req.vif = vif;
tx_req.sta = sta;
tx_req.rtwvif_link = rtwvif_link;
tx_req.rtwsta_link = rtwsta_link;
+ tx_req.desc_info.sw_mld = sw_mld;
rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
@@ -1155,6 +1166,33 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
return 0;
}
+int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+{
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ struct rtw89_sta_link *rtwsta_link = NULL;
+ struct rtw89_vif_link *rtwvif_link;
+
+ if (rtwsta) {
+ rtwsta_link = rtw89_get_designated_link(rtwsta);
+ if (unlikely(!rtwsta_link)) {
+ rtw89_err(rtwdev, "tx: find no sta designated link\n");
+ return -ENOLINK;
+ }
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+ } else {
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
+ if (unlikely(!rtwvif_link)) {
+ rtw89_err(rtwdev, "tx: find no vif designated link\n");
+ return -ENOLINK;
+ }
+ }
+
+ return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
+}
+
static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) |
@@ -1382,7 +1420,9 @@ static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq);
+ u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq) |
+ FIELD_PREP(BE_TXD_BODY3_MLO_FLAG, desc_info->mlo) |
+ FIELD_PREP(BE_TXD_BODY3_IS_MLD_SW_EN, desc_info->sw_mld);
return cpu_to_le32(dword);
}
@@ -1635,10 +1675,7 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
u8 evm_pos = 0;
int i;
- /* FIXME: For single link, taking link on HW-0 here is okay. But, when
- * enabling multiple active links, we should determine the right link.
- */
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_sta_get_link_inst(rtwsta, phy_ppdu->phy_idx);
if (unlikely(!rtwsta_link))
return;
@@ -2058,10 +2095,21 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
break;
if (aid == vif->cfg.aid) {
- enum nl80211_he_ru_alloc rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1);
+ enum nl80211_he_ru_alloc rua;
rtwvif->stats.rx_tf_acc++;
rtwdev->stats.rx_tf_acc++;
+
+ /* The following only required for HE trigger frame, but we
+ * cannot use UL HE-SIG-A2 reserved subfield to identify it
+ * since some 11ax APs will fill it with all 0s, which will
+ * be misunderstood as EHT trigger frame.
+ */
+ if (bss_conf->eht_support)
+ break;
+
+ rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1);
+
if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ &&
rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106)
rtwvif_link->pwr_diff_en = true;
@@ -2152,8 +2200,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
struct sk_buff *skb = iter_data->skb;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu;
+ bool is_mld = ieee80211_vif_is_mld(vif);
struct ieee80211_bss_conf *bss_conf;
struct rtw89_vif_link *rtwvif_link;
const u8 *bssid = iter_data->bssid;
@@ -2165,10 +2215,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rcu_read_lock();
- /* FIXME: For single link, taking link on HW-0 here is okay. But, when
- * enabling multiple active links, we should determine the right link.
- */
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_vif_get_link_inst(rtwvif, desc_info->bb_sel);
if (unlikely(!rtwvif_link))
goto out;
@@ -2184,6 +2231,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (!ether_addr_equal(bss_conf->bssid, bssid))
goto out;
+ if (is_mld) {
+ rx_status->link_valid = true;
+ rx_status->link_id = rtwvif_link->link_id;
+ }
+
if (ieee80211_is_beacon(hdr->frame_control)) {
if (vif->type == NL80211_IFTYPE_STATION &&
!test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
@@ -2482,7 +2534,8 @@ static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
.len = skb->len,
.to_self = desc_info->addr1_match,
.rate = desc_info->data_rate,
- .mac_id = desc_info->mac_id};
+ .mac_id = desc_info->mac_id,
+ .phy_idx = desc_info->bb_sel};
int ret;
if (desc_info->mac_info_valid) {
@@ -2593,6 +2646,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK);
desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD);
desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK);
+ desc_info->bb_sel = le32_get_bits(rxd_s->dword0, BE_RXD_BB_SEL);
if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT)
desc_info->mac_info_valid = true;
@@ -2665,10 +2719,7 @@ void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta)
struct rtw89_sta_link *rtwsta_link;
u8 mac_id = iter_data->mac_id;
- /* FIXME: For single link, taking link on HW-0 here is okay. But, when
- * enabling multiple active links, we should determine the right link.
- */
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_sta_get_link_inst(rtwsta, desc_info->bb_sel);
if (unlikely(!rtwsta_link))
return;
@@ -3117,9 +3168,9 @@ static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
if (!rtwsta)
return false;
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_get_designated_link(rtwsta);
if (unlikely(!rtwsta_link)) {
- rtw89_err(rtwdev, "agg wait: find no link on HW-0\n");
+ rtw89_err(rtwdev, "agg wait: find no designated link\n");
return false;
}
@@ -3284,8 +3335,10 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
+ struct rtw89_sta_link *rtwsta_link;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
+ struct rtw89_sta *rtwsta;
struct sk_buff *skb;
int ret, qsel;
@@ -3298,6 +3351,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
ret = -EINVAL;
goto out;
}
+ rtwsta = sta_to_rtwsta(sta);
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
if (!skb) {
@@ -3309,7 +3363,13 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
if (ps)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
- ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
+ rtwsta_link = rtwsta->links[rtwvif_link->link_id];
+ if (unlikely(!rtwsta_link)) {
+ ret = -ENOLINK;
+ goto out;
+ }
+
+ ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@@ -3329,6 +3389,9 @@ out:
void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_chanctx_pause_parm pause_parm = {
+ .rsn = RTW89_CHANCTX_PAUSE_REASON_ROC,
+ };
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_roc *roc = &rtwvif->roc;
struct rtw89_vif_link *rtwvif_link;
@@ -3342,14 +3405,16 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "roc start: find no link on HW-%u\n",
- RTW89_ROC_BY_LINK_INDEX);
+ rtw89_err(rtwdev, "roc start: find no designated link\n");
return;
}
- rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC);
+ roc->link_id = rtwvif_link->link_id;
+
+ pause_parm.trigger = rtwvif_link;
+ rtw89_chanctx_pause(rtwdev, &pause_parm);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true);
if (ret)
@@ -3369,7 +3434,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
}
cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
- rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, &roc_chan);
+ rtw89_config_roc_chandef(rtwdev, rtwvif_link, &roc_chan);
rtw89_set_channel(rtwdev);
reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
@@ -3398,10 +3463,10 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX);
+ rtwvif_link = rtwvif->links[roc->link_id];
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "roc end: find no link on HW-%u\n",
- RTW89_ROC_BY_LINK_INDEX);
+ rtw89_err(rtwdev, "roc end: find no link (link id %u)\n",
+ roc->link_id);
return;
}
@@ -3409,7 +3474,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
roc->state = RTW89_ROC_IDLE;
- rtw89_config_roc_chandef(rtwdev, rtwvif_link->chanctx_idx, NULL);
+ rtw89_config_roc_chandef(rtwdev, rtwvif_link, NULL);
rtw89_chanctx_proceed(rtwdev, NULL);
ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
if (ret)
@@ -3577,6 +3642,98 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
ewma_tp_init(&stats->rx_ewma_tp);
}
+#define RTW89_MLSR_GOTO_2GHZ_THRESHOLD -53
+#define RTW89_MLSR_EXIT_2GHZ_THRESHOLD -38
+static void rtw89_core_mlsr_link_decision(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ unsigned int sel_link_id = IEEE80211_MLD_MAX_NUM_LINKS;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_vif_link *rtwvif_link;
+ const struct rtw89_chan *chan;
+ unsigned long usable_links;
+ unsigned int link_id;
+ u8 decided_bands;
+ u8 rssi;
+
+ rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
+ if (unlikely(!rssi))
+ return;
+
+ if (RTW89_RSSI_RAW_TO_DBM(rssi) >= RTW89_MLSR_EXIT_2GHZ_THRESHOLD)
+ decided_bands = BIT(RTW89_BAND_5G) | BIT(RTW89_BAND_6G);
+ else if (RTW89_RSSI_RAW_TO_DBM(rssi) <= RTW89_MLSR_GOTO_2GHZ_THRESHOLD)
+ decided_bands = BIT(RTW89_BAND_2G);
+ else
+ return;
+
+ usable_links = ieee80211_vif_usable_links(vif);
+
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
+ if (unlikely(!rtwvif_link))
+ goto select;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ if (decided_bands & BIT(chan->band_type))
+ return;
+
+ usable_links &= ~BIT(rtwvif_link->link_id);
+
+select:
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_channel *channel;
+ enum rtw89_band band;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (unlikely(!link_conf))
+ continue;
+
+ channel = link_conf->chanreq.oper.chan;
+ if (unlikely(!channel))
+ continue;
+
+ band = rtw89_nl80211_to_hw_band(channel->band);
+ if (decided_bands & BIT(band)) {
+ sel_link_id = link_id;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (sel_link_id == IEEE80211_MLD_MAX_NUM_LINKS)
+ return;
+
+ rtw89_core_mlsr_switch(rtwdev, rtwvif, sel_link_id);
+}
+
+static void rtw89_core_mlo_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+
+ if (hal->disabled_dm_bitmap & BIT(RTW89_DM_MLO))
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ continue;
+
+ switch (rtwvif->mlo_mode) {
+ case RTW89_MLO_MODE_MLSR:
+ rtw89_core_mlsr_link_decision(rtwdev, rtwvif);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
@@ -3615,9 +3772,10 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
rtw89_phy_antdiv_track(rtwdev);
rtw89_phy_ul_tb_ctrl_track(rtwdev);
rtw89_phy_edcca_track(rtwdev);
- rtw89_tas_track(rtwdev);
+ rtw89_sar_track(rtwdev);
rtw89_chanctx_track(rtwdev);
rtw89_core_rfkill_poll(rtwdev, false);
+ rtw89_core_mlo_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -3846,6 +4004,9 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
+ if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ rtw89_p2p_noa_once_deinit(rtwvif_link);
+
return 0;
}
@@ -4361,17 +4522,18 @@ static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
#define RTW89_SBAND_IFTYPES_NR 2
-static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
- enum nl80211_band band,
- struct ieee80211_supported_band *sband)
+static int rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
{
struct ieee80211_sband_iftype_data *iftype_data;
enum nl80211_iftype iftype;
int idx = 0;
- iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ iftype_data = devm_kcalloc(rtwdev->dev, RTW89_SBAND_IFTYPES_NR,
+ sizeof(*iftype_data), GFP_KERNEL);
if (!iftype_data)
- return;
+ return -ENOMEM;
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
switch (iftype) {
@@ -4396,77 +4558,75 @@ static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
}
_ieee80211_set_sband_iftype_data(sband, iftype_data, idx);
+ return 0;
+}
+
+static struct ieee80211_supported_band *
+rtw89_core_sband_dup(struct rtw89_dev *rtwdev,
+ const struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_supported_band *dup;
+
+ dup = devm_kmemdup(rtwdev->dev, sband, sizeof(*sband), GFP_KERNEL);
+ if (!dup)
+ return NULL;
+
+ dup->channels = devm_kmemdup(rtwdev->dev, sband->channels,
+ sizeof(*sband->channels) * sband->n_channels,
+ GFP_KERNEL);
+ if (!dup->channels)
+ return NULL;
+
+ dup->bitrates = devm_kmemdup(rtwdev->dev, sband->bitrates,
+ sizeof(*sband->bitrates) * sband->n_bitrates,
+ GFP_KERNEL);
+ if (!dup->bitrates)
+ return NULL;
+
+ return dup;
}
static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
- struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
- struct ieee80211_supported_band *sband_6ghz = NULL;
- u32 size = sizeof(struct ieee80211_supported_band);
+ struct ieee80211_supported_band *sband;
u8 support_bands = rtwdev->chip->support_bands;
+ int ret;
if (support_bands & BIT(NL80211_BAND_2GHZ)) {
- sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
- if (!sband_2ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_2ghz);
+ if (!sband)
+ return -ENOMEM;
+ rtw89_init_ht_cap(rtwdev, &sband->ht_cap);
+ ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband);
+ if (ret)
+ return ret;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
}
if (support_bands & BIT(NL80211_BAND_5GHZ)) {
- sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
- if (!sband_5ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
- rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_5ghz);
+ if (!sband)
+ return -ENOMEM;
+ rtw89_init_ht_cap(rtwdev, &sband->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband->vht_cap);
+ ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband);
+ if (ret)
+ return ret;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
}
if (support_bands & BIT(NL80211_BAND_6GHZ)) {
- sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
- if (!sband_6ghz)
- goto err;
- rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
- hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
+ sband = rtw89_core_sband_dup(rtwdev, &rtw89_sband_6ghz);
+ if (!sband)
+ return -ENOMEM;
+ ret = rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband);
+ if (ret)
+ return ret;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = sband;
}
return 0;
-
-err:
- hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
- if (sband_2ghz)
- kfree((__force void *)sband_2ghz->iftype_data);
- if (sband_5ghz)
- kfree((__force void *)sband_5ghz->iftype_data);
- if (sband_6ghz)
- kfree((__force void *)sband_6ghz->iftype_data);
- kfree(sband_2ghz);
- kfree(sband_5ghz);
- kfree(sband_6ghz);
- return -ENOMEM;
-}
-
-static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
-{
- struct ieee80211_hw *hw = rtwdev->hw;
-
- if (hw->wiphy->bands[NL80211_BAND_2GHZ])
- kfree((__force void *)hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
- if (hw->wiphy->bands[NL80211_BAND_5GHZ])
- kfree((__force void *)hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
- if (hw->wiphy->bands[NL80211_BAND_6GHZ])
- kfree((__force void *)hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
- kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
- kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
- kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
}
static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
@@ -4774,6 +4934,7 @@ struct rtw89_vif_link *rtw89_vif_set_link(struct rtw89_vif *rtwvif,
set_bit(index, rtwvif->links_inst_map);
rtwvif->links[link_id] = rtwvif_link;
+ list_add_tail(&rtwvif_link->dlink_schd, &rtwvif->dlink_pool);
return rtwvif_link;
err:
@@ -4794,6 +4955,7 @@ void rtw89_vif_unset_link(struct rtw89_vif *rtwvif, unsigned int link_id)
index = rtw89_vif_link_inst_get_index(link);
clear_bit(index, rtwvif->links_inst_map);
*container = NULL;
+ list_del(&link->dlink_schd);
}
struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
@@ -4824,6 +4986,7 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
set_bit(index, rtwsta->links_inst_map);
rtwsta->links[link_id] = rtwsta_link;
+ list_add_tail(&rtwsta_link->dlink_schd, &rtwsta->dlink_pool);
return rtwsta_link;
err:
@@ -4844,6 +5007,7 @@ void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id)
index = rtw89_sta_link_inst_get_index(link);
clear_bit(index, rtwsta->links_inst_map);
*container = NULL;
+ list_del(&link->dlink_schd);
}
int rtw89_core_init(struct rtw89_dev *rtwdev)
@@ -4860,6 +5024,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
continue;
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
+ INIT_LIST_HEAD(&rtwdev->scan_info.chan_list);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -4880,6 +5045,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtwdev->total_sta_assoc = 0;
rtw89_init_wait(&rtwdev->mcc.wait);
+ rtw89_init_wait(&rtwdev->mlo.wait);
rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
rtw89_init_wait(&rtwdev->wow.wait);
rtw89_init_wait(&rtwdev->mac.ps_wait);
@@ -4901,7 +5067,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
rtwdev->dbcc_en = true;
rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
- rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
+ rtwdev->mlo_dbcc_mode = MLO_1_PLUS_1_1RF;
}
rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0;
@@ -4919,7 +5085,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_ser_init(rtwdev);
rtw89_entity_init(rtwdev);
- rtw89_tas_init(rtwdev);
+ rtw89_sar_init(rtwdev);
rtw89_phy_ant_gain_init(rtwdev);
return 0;
@@ -4945,9 +5111,6 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
rtwdev->scanning = true;
- rtw89_leave_lps(rtwdev);
- if (hw_scan)
- rtw89_leave_ips_by_hwflags(rtwdev);
ether_addr_copy(rtwvif_link->mac_addr, mac_addr);
rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type);
@@ -5062,6 +5225,76 @@ out:
rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl);
}
+int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ unsigned int link_id)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ u16 active_links = vif->active_links;
+ struct rtw89_vif_link *target, *cur;
+ int ret;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ if (unlikely(!ieee80211_vif_is_mld(vif)))
+ return -EOPNOTSUPP;
+
+ if (unlikely(!(usable_links & BIT(link_id)))) {
+ rtw89_warn(rtwdev, "%s: link id %u is not usable\n", __func__,
+ link_id);
+ return -ENOLINK;
+ }
+
+ if (active_links == BIT(link_id))
+ return 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: switch to link id %u MLSR\n",
+ __func__, link_id);
+
+ rtw89_leave_lps(rtwdev);
+
+ ieee80211_stop_queues(rtwdev->hw);
+ flush_work(&rtwdev->txq_work);
+
+ cur = rtw89_get_designated_link(rtwvif);
+
+ ret = ieee80211_set_active_links(vif, active_links | BIT(link_id));
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to activate link id %u\n",
+ __func__, link_id);
+ goto wake_queue;
+ }
+
+ target = rtwvif->links[link_id];
+ if (unlikely(!target)) {
+ rtw89_err(rtwdev, "%s: failed to confirm link id %u\n",
+ __func__, link_id);
+
+ ieee80211_set_active_links(vif, active_links);
+ ret = -EFAULT;
+ goto wake_queue;
+ }
+
+ if (likely(cur))
+ rtw89_fw_h2c_mlo_link_cfg(rtwdev, cur, false);
+
+ rtw89_fw_h2c_mlo_link_cfg(rtwdev, target, true);
+
+ ret = ieee80211_set_active_links(vif, BIT(link_id));
+ if (ret)
+ rtw89_err(rtwdev, "%s: failed to inactivate links 0x%x\n",
+ __func__, active_links);
+
+ rtw89_chip_rfk_channel(rtwdev, target);
+
+ rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
+
+wake_queue:
+ ieee80211_wake_queues(rtwdev->hw);
+
+ return ret;
+}
+
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
@@ -5305,8 +5538,11 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (chip->chip_gen == RTW89_CHIP_BE)
hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
- if (rtwdev->support_mlo)
+ if (rtwdev->support_mlo) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ hw->wiphy->iftype_ext_capab = rtw89_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(rtw89_iftypes_ext_capa);
+ }
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
@@ -5337,7 +5573,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ret = rtw89_regd_setup(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to set up regd\n");
- goto err_free_supported_band;
+ return ret;
}
hw->wiphy->sar_capa = &rtw89_sar_capa;
@@ -5345,7 +5581,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ret = ieee80211_register_hw(hw);
if (ret) {
rtw89_err(rtwdev, "failed to register hw\n");
- goto err_free_supported_band;
+ return ret;
}
ret = rtw89_regd_init_hint(rtwdev);
@@ -5360,8 +5596,6 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
err_unregister_hw:
ieee80211_unregister_hw(hw);
-err_free_supported_band:
- rtw89_core_clr_supported_band(rtwdev);
return ret;
}
@@ -5372,7 +5606,6 @@ static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
rtw89_rfkill_polling_deinit(rtwdev);
ieee80211_unregister_hw(hw);
- rtw89_core_clr_supported_band(rtwdev);
}
int rtw89_core_register(struct rtw89_dev *rtwdev)
@@ -5440,13 +5673,13 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
if (!hw)
goto err;
- /* TODO: When driver MLO arch. is done, determine whether to support MLO
- * according to the following conditions.
- * 1. run with chanctx_ops
- * 2. chip->support_link_num != 0
- * 3. FW feature supports AP_LINK_PS
+ /* Currently, our AP_LINK_PS handling only works for non-MLD softap
+ * or MLD-single-link softap. If RTW89_MLD_NON_STA_LINK_NUM enlarges,
+ * please tweak entire AP_LINKS_PS handling before supporting MLO.
*/
- support_mlo = false;
+ support_mlo = !no_chanctx && chip->support_link_num &&
+ RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &early_fw) &&
+ RTW89_MLD_NON_STA_LINK_NUM == 1;
hw->wiphy->iface_combinations = rtw89_iface_combs;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 4be05d6cad18..1c8f3b9b7c4c 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -798,6 +798,7 @@ struct rtw89_rx_phy_ppdu {
u8 rssi[RF_PATH_MAX];
u8 mac_id;
u8 chan_idx;
+ u8 phy_idx;
u8 ie;
u16 rate;
u8 rpl_avg;
@@ -1174,6 +1175,7 @@ struct rtw89_tx_desc_info {
bool ldpc;
bool upd_wlan_hdr;
bool mlo;
+ bool sw_mld;
};
struct rtw89_core_tx_request {
@@ -3379,6 +3381,7 @@ struct rtw89_sec_cam_entry {
struct rtw89_sta_link {
struct rtw89_sta *rtwsta;
+ struct list_head dlink_schd;
unsigned int link_id;
u8 mac_id;
@@ -3445,14 +3448,13 @@ enum rtw89_roc_state {
RTW89_ROC_MGMT,
};
-#define RTW89_ROC_BY_LINK_INDEX 0
-
struct rtw89_roc {
struct ieee80211_channel chan;
struct wiphy_delayed_work roc_work;
enum ieee80211_roc_type type;
enum rtw89_roc_state state;
int duration;
+ unsigned int link_id;
};
#define RTW89_P2P_MAX_NOA_NUM 2
@@ -3483,8 +3485,17 @@ struct rtw89_p2p_noa_setter {
u8 noa_index;
};
+struct rtw89_ps_noa_once_handler {
+ bool in_duration;
+ u64 tsf_begin;
+ u64 tsf_end;
+ struct wiphy_delayed_work set_work;
+ struct wiphy_delayed_work clr_work;
+};
+
struct rtw89_vif_link {
struct rtw89_vif *rtwvif;
+ struct list_head dlink_schd;
unsigned int link_id;
bool chanctx_assigned; /* only valid when running with chanctx_ops */
@@ -3507,6 +3518,7 @@ struct rtw89_vif_link {
u8 hit_rule;
u8 last_noa_nr;
u64 sync_bcn_tsf;
+ bool rand_tsf_done;
bool trigger;
bool lsig_txop;
u8 tgt_ind;
@@ -3527,6 +3539,7 @@ struct rtw89_vif_link {
struct rtw89_phy_rate_pattern rate_pattern;
struct list_head general_pkt_list;
struct rtw89_p2p_noa_setter p2p_noa;
+ struct rtw89_ps_noa_once_handler noa_once;
};
enum rtw89_lv1_rcvy_step {
@@ -3986,7 +3999,11 @@ struct rtw89_rfe_parms {
struct rtw89_txpwr_rule_2ghz rule_2ghz;
struct rtw89_txpwr_rule_5ghz rule_5ghz;
struct rtw89_txpwr_rule_6ghz rule_6ghz;
+ struct rtw89_txpwr_rule_2ghz rule_da_2ghz;
+ struct rtw89_txpwr_rule_5ghz rule_da_5ghz;
+ struct rtw89_txpwr_rule_6ghz rule_da_6ghz;
struct rtw89_tx_shape tx_shape;
+ bool has_da;
};
struct rtw89_rfe_parms_conf {
@@ -4081,9 +4098,15 @@ struct rtw89_rfe_data {
struct rtw89_txpwr_lmt_2ghz_data lmt_2ghz;
struct rtw89_txpwr_lmt_5ghz_data lmt_5ghz;
struct rtw89_txpwr_lmt_6ghz_data lmt_6ghz;
+ struct rtw89_txpwr_lmt_2ghz_data da_lmt_2ghz;
+ struct rtw89_txpwr_lmt_5ghz_data da_lmt_5ghz;
+ struct rtw89_txpwr_lmt_6ghz_data da_lmt_6ghz;
struct rtw89_txpwr_lmt_ru_2ghz_data lmt_ru_2ghz;
struct rtw89_txpwr_lmt_ru_5ghz_data lmt_ru_5ghz;
struct rtw89_txpwr_lmt_ru_6ghz_data lmt_ru_6ghz;
+ struct rtw89_txpwr_lmt_ru_2ghz_data da_lmt_ru_2ghz;
+ struct rtw89_txpwr_lmt_ru_5ghz_data da_lmt_ru_5ghz;
+ struct rtw89_txpwr_lmt_ru_6ghz_data da_lmt_ru_6ghz;
struct rtw89_tx_shape_lmt_data tx_shape_lmt;
struct rtw89_tx_shape_lmt_ru_data tx_shape_lmt_ru;
struct rtw89_rfe_parms rfe_parms;
@@ -4284,12 +4307,14 @@ struct rtw89_chip_info {
bool support_rnr;
bool support_ant_gain;
bool support_tas;
+ bool support_sar_by_ant;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool rx_freq_frome_ie;
bool hw_sec_hdr;
bool hw_mgmt_tx_encrypt;
bool hw_tkip_crypto;
+ bool hw_mlo_bmc_crypto;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -4494,6 +4519,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_LPS_CH_INFO,
RTW89_FW_FEATURE_NO_PHYCAP_P1,
RTW89_FW_FEATURE_NO_POWER_DIFFERENCE,
+ RTW89_FW_FEATURE_BEACON_LOSS_COUNT_V1,
};
struct rtw89_fw_suit {
@@ -4606,6 +4632,7 @@ struct rtw89_cam_info {
enum rtw89_sar_sources {
RTW89_SAR_SOURCE_NONE,
RTW89_SAR_SOURCE_COMMON,
+ RTW89_SAR_SOURCE_ACPI,
RTW89_SAR_SOURCE_NR,
};
@@ -4630,8 +4657,62 @@ struct rtw89_sar_cfg_common {
s32 cfg[RTW89_SAR_SUBBAND_NR];
};
+enum rtw89_acpi_sar_subband {
+ RTW89_ACPI_SAR_2GHZ_SUBBAND,
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_1, /* U-NII-1 */
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_2, /* U-NII-2 */
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */
+ RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_ACPI_SAR_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ NUM_OF_RTW89_ACPI_SAR_SUBBAND,
+ RTW89_ACPI_SAR_SUBBAND_NR_LEGACY = RTW89_ACPI_SAR_5GHZ_SUBBAND_3_4 + 1,
+ RTW89_ACPI_SAR_SUBBAND_NR_HAS_6GHZ = RTW89_ACPI_SAR_6GHZ_SUBBAND_8 + 1,
+};
+
+#define TXPWR_FACTOR_OF_RTW89_ACPI_SAR 3 /* unit: 0.125 dBm */
+#define MAX_VAL_OF_RTW89_ACPI_SAR S16_MAX
+#define MIN_VAL_OF_RTW89_ACPI_SAR S16_MIN
+#define MAX_NUM_OF_RTW89_ACPI_SAR_TBL 6
+#define NUM_OF_RTW89_ACPI_SAR_RF_PATH (RF_PATH_B + 1)
+
+struct rtw89_sar_entry_from_acpi {
+ s16 v[NUM_OF_RTW89_ACPI_SAR_SUBBAND][NUM_OF_RTW89_ACPI_SAR_RF_PATH];
+};
+
+struct rtw89_sar_table_from_acpi {
+ /* If this table is active, must fill all fields according to either
+ * configuration in BIOS or some default values for SAR to work well.
+ */
+ struct rtw89_sar_entry_from_acpi entries[RTW89_REGD_NUM];
+};
+
+struct rtw89_sar_indicator_from_acpi {
+ bool enable_sync;
+ unsigned int fields;
+ u8 (*rfpath_to_antidx)(enum rtw89_rf_path rfpath);
+
+ /* Select among @tables of container, rtw89_sar_cfg_acpi, by path.
+ * Not design with pointers since addresses will be invalid after
+ * sync content with local container instance.
+ */
+ u8 tblsel[NUM_OF_RTW89_ACPI_SAR_RF_PATH];
+};
+
+struct rtw89_sar_cfg_acpi {
+ u8 downgrade_2tx;
+ unsigned int valid_num;
+ struct rtw89_sar_table_from_acpi tables[MAX_NUM_OF_RTW89_ACPI_SAR_TBL];
+ struct rtw89_sar_indicator_from_acpi indicator;
+};
+
struct rtw89_sar_info {
- /* used to decide how to acces SAR cfg union */
+ /* used to decide how to access SAR cfg union */
enum rtw89_sar_sources src;
/* reserved for different knids of SAR cfg struct.
@@ -4639,6 +4720,7 @@ struct rtw89_sar_info {
*/
union {
struct rtw89_sar_cfg_common cfg_common;
+ struct rtw89_sar_cfg_acpi cfg_acpi;
};
};
@@ -4674,11 +4756,14 @@ struct rtw89_ant_gain_info {
struct rtw89_6ghz_span {
enum rtw89_sar_subband sar_subband_low;
enum rtw89_sar_subband sar_subband_high;
+ enum rtw89_acpi_sar_subband acpi_sar_subband_low;
+ enum rtw89_acpi_sar_subband acpi_sar_subband_high;
enum rtw89_ant_gain_subband ant_gain_subband_low;
enum rtw89_ant_gain_subband ant_gain_subband_high;
};
#define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high)
+#define RTW89_ACPI_SAR_SPAN_VALID(span) ((span)->acpi_sar_subband_high)
#define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high)
enum rtw89_tas_state {
@@ -4692,6 +4777,7 @@ enum rtw89_tas_state {
struct rtw89_tas_info {
u16 tx_ratio_history[RTW89_TAS_TX_RATIO_WINDOW];
u64 txpwr_history[RTW89_TAS_TXPWR_WINDOW];
+ u8 enabled_countries;
u8 txpwr_head_idx;
u8 txpwr_tail_idx;
u8 tx_ratio_idx;
@@ -4765,6 +4851,7 @@ enum rtw89_dm_type {
RTW89_DM_DYNAMIC_EDCCA,
RTW89_DM_THERMAL_PROTECT,
RTW89_DM_TAS,
+ RTW89_DM_MLO,
};
#define RTW89_THERMAL_PROT_LV_MAX 5
@@ -4786,6 +4873,7 @@ struct rtw89_hal {
bool no_mcs_12_13;
atomic_t roc_chanctx_idx;
+ u8 roc_link_index;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX);
@@ -5207,6 +5295,8 @@ struct rtw89_regulatory_info {
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
+ bool txpwr_uk_follow_etsi;
+
DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
@@ -5360,9 +5450,10 @@ struct rtw89_early_h2c {
struct rtw89_hw_scan_info {
struct rtw89_vif_link *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
+ struct list_head chan_list;
struct rtw89_chan op_chan;
+ bool connected;
bool abort;
- u32 last_chan_idx;
};
enum rtw89_phy_bb_gain_band {
@@ -5574,6 +5665,8 @@ struct rtw89_mcc_role {
struct rtw89_mcc_policy policy;
struct rtw89_mcc_limit limit;
+ const struct rtw89_mcc_courtesy_cfg *crtz;
+
/* only valid when running with FW MRC mechanism */
u8 slot_idx;
@@ -5591,13 +5684,16 @@ struct rtw89_mcc_bt_role {
u16 duration; /* TU */
};
-struct rtw89_mcc_courtesy {
- bool enable;
+struct rtw89_mcc_courtesy_cfg {
u8 slot_num;
- u8 macid_src;
u8 macid_tgt;
};
+struct rtw89_mcc_courtesy {
+ struct rtw89_mcc_courtesy_cfg ref;
+ struct rtw89_mcc_courtesy_cfg aux;
+};
+
enum rtw89_mcc_plan {
RTW89_MCC_PLAN_TAIL_BT,
RTW89_MCC_PLAN_MID_BT,
@@ -5631,6 +5727,7 @@ struct rtw89_mcc_config {
struct rtw89_mcc_pattern pattern;
struct rtw89_mcc_sync sync;
u64 start_tsf;
+ u64 start_tsf_in_aux_domain;
u16 mcc_interval; /* TU */
u16 beacon_offset; /* TU */
};
@@ -5651,6 +5748,16 @@ struct rtw89_mcc_info {
struct rtw89_mcc_config config;
};
+enum rtw89_mlo_mode {
+ RTW89_MLO_MODE_MLSR = 0,
+
+ NUM_OF_RTW89_MLO_MODE,
+};
+
+struct rtw89_mlo_info {
+ struct rtw89_wait_info wait;
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
@@ -5666,6 +5773,7 @@ struct rtw89_dev {
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
struct rtw89_mcc_info mcc;
+ struct rtw89_mlo_info mlo;
struct rtw89_mac_info mac;
struct rtw89_fw_info fw;
struct rtw89_hci_info hci;
@@ -5802,6 +5910,9 @@ struct rtw89_vif {
struct rtw89_roc roc;
bool offchan;
+ enum rtw89_mlo_mode mlo_mode;
+
+ struct list_head dlink_pool;
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_vif_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5841,6 +5952,7 @@ struct rtw89_sta {
DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+ struct list_head dlink_pool;
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5936,6 +6048,12 @@ rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
return rcu_dereference(rtwdev->assoc_link_on_macid[macid]);
}
+#define rtw89_get_designated_link(links_holder) \
+({ \
+ typeof(links_holder) p = links_holder; \
+ list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \
+})
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6825,9 +6943,14 @@ static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band)
{
- const struct rtw89_regd *regd = rtwdev->regulatory.regd;
+ const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ u8 txpwr_regd = regd->txpwr_regd[band];
+
+ if (regulatory->txpwr_uk_follow_etsi && txpwr_regd == RTW89_UK)
+ return RTW89_ETSI;
- return regd->txpwr_regd[band];
+ return txpwr_regd;
}
static inline void rtw89_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
@@ -7185,6 +7308,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
+const char *rtw89_regd_get_string(enum rtw89_regulation_type regd);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
@@ -7206,5 +7330,7 @@ void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct ieee80211_bss_conf *bss_conf);
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event);
+int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ unsigned int link_id);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index f2c5753fd386..d6016fa107fb 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -85,6 +85,7 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv phy_info;
struct rtw89_debugfs_priv stations;
struct rtw89_debugfs_priv disable_dm;
+ struct rtw89_debugfs_priv mlo_mode;
};
struct rtw89_debugfs_iter_data {
@@ -854,45 +855,21 @@ static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t buf
return p - buf;
}
-#define case_REGD(_regd) \
- case RTW89_ ## _regd: \
- p += scnprintf(p, end - p, #_regd "\n"); \
- break
-
static int __print_regd(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
const struct rtw89_chan *chan)
{
+ const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
char *p = buf, *end = buf + bufsz;
u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
- switch (regd) {
- default:
- p += scnprintf(p, end - p, "UNKNOWN: %d\n", regd);
- break;
- case_REGD(WW);
- case_REGD(ETSI);
- case_REGD(FCC);
- case_REGD(MKK);
- case_REGD(NA);
- case_REGD(IC);
- case_REGD(KCC);
- case_REGD(NCC);
- case_REGD(CHILE);
- case_REGD(ACMA);
- case_REGD(MEXICO);
- case_REGD(UKRAINE);
- case_REGD(CN);
- case_REGD(QATAR);
- case_REGD(UK);
- case_REGD(THAILAND);
- }
+ p += scnprintf(p, end - p, "%s\n", rtw89_regd_get_string(regd));
+ p += scnprintf(p, end - p, "\t(txpwr UK follow ETSI: %s)\n",
+ str_yes_no(regulatory->txpwr_uk_follow_etsi));
return p - buf;
}
-#undef case_REGD
-
struct dbgfs_txpwr_table {
const struct txpwr_map *byr;
const struct txpwr_map *lmt;
@@ -949,6 +926,7 @@ ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev,
char *buf, size_t bufsz)
{
enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ struct rtw89_sar_parm sar_parm = {};
const struct dbgfs_txpwr_table *tbl;
const struct rtw89_chan *chan;
char *p = buf, *end = buf + bufsz;
@@ -958,11 +936,12 @@ ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev,
rtw89_leave_ps_mode(rtwdev);
chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ sar_parm.center_freq = chan->freq;
p += rtw89_debug_priv_txpwr_table_get_regd(rtwdev, p, end - p, chan);
p += scnprintf(p, end - p, "[SAR]\n");
- p += rtw89_print_sar(rtwdev, p, end - p, chan->freq);
+ p += rtw89_print_sar(rtwdev, p, end - p, &sar_parm);
p += scnprintf(p, end - p, "[TAS]\n");
p += rtw89_print_tas(rtwdev, p, end - p);
@@ -3993,14 +3972,16 @@ static int rtw89_dump_pkt_offload(char *buf, size_t bufsz, struct list_head *pkt
static int rtw89_vif_link_ids_get(struct rtw89_dev *rtwdev,
char *buf, size_t bufsz, u8 *mac,
- struct rtw89_vif_link *rtwvif_link)
+ struct rtw89_vif_link *rtwvif_link,
+ bool designated)
{
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
char *p = buf, *end = buf + bufsz;
p += scnprintf(p, end - p, " [%u] %pM\n", rtwvif_link->mac_id,
rtwvif_link->mac_addr);
- p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwvif_link->link_id);
+ p += scnprintf(p, end - p, "\tlink_id=%u%s\n", rtwvif_link->link_id,
+ designated ? " (*)" : "");
p += scnprintf(p, end - p, "\tbssid_cam_idx=%u\n",
bssid_cam->bssid_cam_idx);
p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwvif_link->addr_cam);
@@ -4017,15 +3998,19 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
(struct rtw89_debugfs_iter_data *)data;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif_link *designated_link;
struct rtw89_vif_link *rtwvif_link;
size_t bufsz = iter_data->bufsz;
char *buf = iter_data->buf;
char *p = buf, *end = buf + bufsz;
unsigned int link_id;
+ designated_link = rtw89_get_designated_link(rtwvif);
+
p += scnprintf(p, end - p, "VIF %pM\n", rtwvif->mac_addr);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link);
+ p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link,
+ rtwvif_link == designated_link);
rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
@@ -4055,7 +4040,8 @@ static int rtw89_dump_ba_cam(struct rtw89_dev *rtwdev,
static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev,
char *buf, size_t bufsz,
- struct rtw89_sta_link *rtwsta_link)
+ struct rtw89_sta_link *rtwsta_link,
+ bool designated)
{
struct ieee80211_link_sta *link_sta;
char *p = buf, *end = buf + bufsz;
@@ -4069,7 +4055,8 @@ static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev,
rcu_read_unlock();
- p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwsta_link->link_id);
+ p += scnprintf(p, end - p, "\tlink_id=%u%s\n", rtwsta_link->link_id,
+ designated ? " (*)" : "");
p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwsta_link->addr_cam);
p += rtw89_dump_ba_cam(rtwdev, p, end - p, rtwsta_link);
@@ -4082,16 +4069,20 @@ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
(struct rtw89_debugfs_iter_data *)data;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_sta_link *designated_link;
struct rtw89_sta_link *rtwsta_link;
size_t bufsz = iter_data->bufsz;
char *buf = iter_data->buf;
char *p = buf, *end = buf + bufsz;
unsigned int link_id;
+ designated_link = rtw89_get_designated_link(rtwsta);
+
p += scnprintf(p, end - p, "STA %pM %s\n", sta->addr,
sta->tdls ? "(TDLS)" : "");
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
- p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link);
+ p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link,
+ rtwsta_link == designated_link);
rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
@@ -4146,6 +4137,35 @@ static ssize_t rtw89_debug_priv_stations_get(struct rtw89_dev *rtwdev,
return p - buf;
}
+static void rtw89_debug_disable_dm_cfg_bmap(struct rtw89_dev *rtwdev, u32 new)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 old = hal->disabled_dm_bitmap;
+
+ if (new == old)
+ return;
+
+ hal->disabled_dm_bitmap = new;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "Disable DM: 0x%x -> 0x%x\n", old, new);
+}
+
+static void rtw89_debug_disable_dm_set_flag(struct rtw89_dev *rtwdev, u8 flag)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 cur = hal->disabled_dm_bitmap;
+
+ rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur | BIT(flag));
+}
+
+static void rtw89_debug_disable_dm_clr_flag(struct rtw89_dev *rtwdev, u8 flag)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 cur = hal->disabled_dm_bitmap;
+
+ rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur & ~BIT(flag));
+}
+
#define DM_INFO(type) {RTW89_DM_ ## type, #type}
static const struct rtw89_disabled_dm_info {
@@ -4155,6 +4175,7 @@ static const struct rtw89_disabled_dm_info {
DM_INFO(DYNAMIC_EDCCA),
DM_INFO(THERMAL_PROTECT),
DM_INFO(TAS),
+ DM_INFO(MLO),
};
static ssize_t
@@ -4188,7 +4209,6 @@ rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
const char *buf, size_t count)
{
- struct rtw89_hal *hal = &rtwdev->hal;
u32 conf;
int ret;
@@ -4196,7 +4216,83 @@ rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev,
if (ret)
return -EINVAL;
- hal->disabled_dm_bitmap = conf;
+ rtw89_debug_disable_dm_cfg_bmap(rtwdev, conf);
+
+ return count;
+}
+
+static void rtw89_debug_mlo_mode_set_mlsr(struct rtw89_dev *rtwdev,
+ unsigned int link_id)
+{
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+ if (!ieee80211_vif_is_mld(vif))
+ continue;
+
+ rtw89_core_mlsr_switch(rtwdev, rtwvif, link_id);
+ }
+}
+
+static ssize_t
+rtw89_debug_priv_mlo_mode_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
+{
+ bool mlo_dm_dis = rtwdev->hal.disabled_dm_bitmap & BIT(RTW89_DM_MLO);
+ char *p = buf, *end = buf + bufsz;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+ int count = 0;
+
+ p += scnprintf(p, end - p, "MLD(s) status: (MLO DM: %s)\n",
+ str_disable_enable(mlo_dm_dis));
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ vif = rtwvif_to_vif(rtwvif);
+ if (!ieee80211_vif_is_mld(vif))
+ continue;
+
+ p += scnprintf(p, end - p,
+ "\t#%u: MLO mode %x, valid 0x%x, active 0x%x\n",
+ count++, rtwvif->mlo_mode, vif->valid_links,
+ vif->active_links);
+ }
+
+ if (count == 0)
+ p += scnprintf(p, end - p, "\t(None)\n");
+
+ return p - buf;
+}
+
+static ssize_t
+rtw89_debug_priv_mlo_mode_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
+{
+ u8 num, mlo_mode;
+ u32 argv;
+
+ num = sscanf(buf, "%hhx %u", &mlo_mode, &argv);
+ if (num != 2)
+ return -EINVAL;
+
+ rtw89_debug_disable_dm_set_flag(rtwdev, RTW89_DM_MLO);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "Set MLO mode to %x\n", mlo_mode);
+
+ switch (mlo_mode) {
+ case RTW89_MLO_MODE_MLSR:
+ rtw89_debug_mlo_mode_set_mlsr(rtwdev, argv);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "Unsupported MLO mode\n");
+ rtw89_debug_disable_dm_clr_flag(rtwdev, RTW89_DM_MLO);
+
+ return -EOPNOTSUPP;
+ }
return count;
}
@@ -4257,7 +4353,8 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = {
.fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK),
.phy_info = rtw89_debug_priv_get(phy_info),
.stations = rtw89_debug_priv_get(stations, RLOCK),
- .disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
+ .disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK),
+ .mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
@@ -4302,6 +4399,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top
rtw89_debugfs_add_r(phy_info);
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
+ rtw89_debugfs_add_rw(mlo_mode);
}
void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 8643b17866f8..00b65b2995cf 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -15,6 +15,8 @@
#include "util.h"
#include "wow.h"
+static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev);
+
struct rtw89_eapol_2_of_2 {
u8 gtkbody[14];
u8 key_des_ver;
@@ -554,7 +556,7 @@ const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
if (sizeof(*mfw_hdr) > firmware->size)
return NULL;
- mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data;
+ mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0];
if (mfw_hdr->sig != RTW89_MFW_SIG)
return NULL;
@@ -850,6 +852,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -1018,7 +1021,7 @@ int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
}
n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
- regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
+ regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL);
if (!regs)
goto out;
@@ -1298,6 +1301,18 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
},
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL,
+ },
[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
@@ -1310,6 +1325,18 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
},
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL,
+ },
+ [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = {
+ rtw89_fw_recognize_txpwr_from_elm,
+ { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL,
+ },
[RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
@@ -2483,7 +2510,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
if (enable)
comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
- BIT(RTW89_FW_LOG_COMP_SCAN);
+ BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
@@ -3065,8 +3092,8 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
ntx_path = RF_A;
map_b = 0;
} else {
- ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
- map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+ ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
+ map_b = ntx_path == RF_AB ? 1 : 0;
}
SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
@@ -4004,8 +4031,9 @@ out:
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link, bool dis_conn)
{
- struct sk_buff *skb;
u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ bool is_mld = ieee80211_vif_is_mld(vif);
u8 self_role = rtwvif_link->self_role;
enum rtw89_fw_sta_type sta_type;
u8 net_type = rtwvif_link->net_type;
@@ -4013,6 +4041,9 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
struct rtw89_h2c_join *h2c;
u32 len = sizeof(*h2c);
bool format_v1 = false;
+ struct sk_buff *skb;
+ u8 main_mac_id;
+ bool init_ps;
int ret;
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
@@ -4054,8 +4085,28 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
+ init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif);
+
+ if (rtwsta_link)
+ main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
+ else
+ main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
+
+ h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) |
+ le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) |
+ le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) |
+ le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR,
+ RTW89_H2C_JOININFO_W1_MLO_MODE) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) |
+ le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) |
+ le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US,
+ RTW89_H2C_JOININFO_W1_EMLSR_PADDING) |
+ le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US,
+ RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT);
- h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
h2c_v1->w2 = 0;
done:
@@ -4339,6 +4390,7 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
struct rtw89_h2c_bcnfltr *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u8 max_cnt, cnt;
int ret;
if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
@@ -4367,12 +4419,20 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw))
+ max_cnt = BIT(7) - 1;
+ else
+ max_cnt = BIT(4) - 1;
+
+ cnt = min(RTW89_BCN_LOSS_CNT, max_cnt);
+
h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
RTW89_H2C_BCNFLTR_W0_MODE) |
- le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
+ le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) |
+ le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) |
le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
le32_encode_bits(thold + MAX_RSSI,
RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
@@ -5298,12 +5358,12 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
}
static
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
- struct list_head *chan_list)
+int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem *elem;
- struct rtw89_mac_chinfo *ch_info;
+ struct rtw89_mac_chinfo_ax *ch_info;
struct rtw89_h2c_chinfo *h2c;
struct sk_buff *skb;
unsigned int cond;
@@ -5477,7 +5537,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return 0;
}
-#define RTW89_SCAN_DELAY_TSF_UNIT 104800
+#define RTW89_SCAN_DELAY_TSF_UNIT 1000000
int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
struct rtw89_vif_link *rtwvif_link,
@@ -5741,7 +5801,7 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
le32_encode_bits(0,
RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
- le32_encode_bits(2,
+ le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2,
RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
@@ -6525,7 +6585,7 @@ void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
rtw89_fw_prog_cnt_dump(rtwdev);
}
-static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
+static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev)
{
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
struct rtw89_pktofld_info *info, *tmp;
@@ -6544,6 +6604,23 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
}
}
+static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+
+ mac->free_chan_list(rtwdev);
+ rtw89_hw_scan_release_pkt_list(rtwdev);
+
+ rtwvif->scan_req = NULL;
+ rtwvif->scan_ies = NULL;
+ scan_info->scanning_vif = NULL;
+ scan_info->abort = false;
+ scan_info->connected = false;
+}
+
static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
struct cfg80211_scan_request *req,
struct rtw89_pktofld_info *info,
@@ -6612,7 +6689,8 @@ out:
}
static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+ struct rtw89_vif_link *rtwvif_link,
+ const u8 *mac_addr)
{
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
@@ -6621,7 +6699,7 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
int ret;
for (i = 0; i < num; i++) {
- skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
+ skb = ieee80211_probereq_get(rtwdev->hw, mac_addr,
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
@@ -6638,10 +6716,10 @@ static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
return 0;
}
-static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
- struct ieee80211_scan_ies *ies,
- struct cfg80211_scan_request *req,
- struct rtw89_mac_chinfo *ch_info)
+static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev,
+ struct ieee80211_scan_ies *ies,
+ struct cfg80211_scan_request *req,
+ struct rtw89_mac_chinfo_ax *ch_info)
{
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
@@ -6713,7 +6791,7 @@ out:
static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
int chan_type, int ssid_num,
- struct rtw89_mac_chinfo *ch_info)
+ struct rtw89_mac_chinfo_ax *ch_info)
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_pktofld_info *info;
@@ -6761,9 +6839,9 @@ static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
- int ssid_num,
- struct rtw89_mac_chinfo *ch_info)
+static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_ax *ch_info)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
@@ -6794,7 +6872,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
- ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info);
+ ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info);
if (ret)
rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
@@ -6950,7 +7028,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
- struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
struct ieee80211_channel *channel;
struct list_head chan_list;
int list_len;
@@ -6984,7 +7062,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
list_add_tail(&ch_info->list, &chan_list);
}
- ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+ ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list);
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -6995,24 +7073,24 @@ out:
return ret;
}
-int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected)
+int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
- struct rtw89_mac_chinfo *ch_info, *tmp;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
struct ieee80211_channel *channel;
struct list_head chan_list;
bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
- int list_len, off_chan_time = 0;
enum rtw89_chan_type type;
- int ret = 0;
+ int off_chan_time = 0;
+ int ret;
u32 idx;
INIT_LIST_HEAD(&chan_list);
- for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
- idx++, list_len++) {
+
+ for (idx = 0; idx < req->n_channels; idx++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
if (!ch_info) {
@@ -7039,9 +7117,9 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
type = RTW89_CHAN_DFS;
else
type = RTW89_CHAN_ACTIVE;
- rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
+ rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info);
- if (connected &&
+ if (scan_info->connected &&
off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
@@ -7053,16 +7131,16 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
type = RTW89_CHAN_OPERATE;
tmp->period = req->duration_mandatory ?
req->duration : RTW89_CHANNEL_TIME;
- rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
+ rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
list_add_tail(&tmp->list, &chan_list);
off_chan_time = 0;
- list_len++;
}
list_add_tail(&ch_info->list, &chan_list);
off_chan_time += ch_info->period;
}
- rtwdev->scan_info.last_chan_idx = idx;
- ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
+
+ list_splice_tail(&chan_list, &scan_info->chan_list);
+ return 0;
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -7073,6 +7151,46 @@ out:
return ret;
}
+void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+}
+
+int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_ax *ch_info, *tmp;
+ unsigned int list_len = 0;
+ struct list_head list;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_move_tail(&ch_info->list, &list);
+
+ list_len++;
+ if (list_len == RTW89_SCAN_LIST_LIMIT_AX)
+ break;
+ }
+
+ ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list);
+
+ list_for_each_entry_safe(ch_info, tmp, &list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
@@ -7126,25 +7244,24 @@ out:
return ret;
}
-int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected)
+int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo_be *ch_info, *tmp;
struct ieee80211_channel *channel;
struct list_head chan_list;
enum rtw89_chan_type type;
- int list_len, ret;
bool random_seq;
+ int ret;
u32 idx;
random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
INIT_LIST_HEAD(&chan_list);
- for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
- idx++, list_len++) {
+ for (idx = 0; idx < req->n_channels; idx++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
if (!ch_info) {
@@ -7174,9 +7291,8 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
list_add_tail(&ch_info->list, &chan_list);
}
- rtwdev->scan_info.last_chan_idx = idx;
- ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
- rtwvif_link);
+ list_splice_tail(&chan_list, &scan_info->chan_list);
+ return 0;
out:
list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
@@ -7187,51 +7303,182 @@ out:
return ret;
}
+void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+}
+
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ unsigned int list_len = 0;
+ struct list_head list;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+
+ list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
+ list_move_tail(&ch_info->list, &list);
+
+ list_len++;
+ if (list_len == RTW89_SCAN_LIST_LIMIT_BE)
+ break;
+ }
+
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list,
+ rtwvif_link);
+
+ list_for_each_entry_safe(ch_info, tmp, &list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected)
+ struct rtw89_vif_link *rtwvif_link,
+ const u8 *mac_addr)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
- ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link);
+ ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr);
if (ret) {
rtw89_err(rtwdev, "Update probe request failed\n");
goto out;
}
- ret = mac->add_chan_list(rtwdev, rtwvif_link, connected);
+ ret = mac->prep_chan_list(rtwdev, rtwvif_link);
out:
return ret;
}
-void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_scan_request *scan_req)
+static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 tu)
+{
+ struct ieee80211_p2p_noa_desc noa_desc = {};
+ u64 tsf;
+ int ret;
+
+ ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ if (ret) {
+ rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
+ return;
+ }
+
+ noa_desc.start_time = cpu_to_le32(tsf);
+ noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
+ noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
+ noa_desc.count = 1;
+
+ rtw89_p2p_noa_renew(rtwvif_link);
+ rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
+}
+
+static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
+ const struct cfg80211_scan_request *req)
+{
+ const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
+ const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_chinfo_ax *chinfo_ax;
+ struct rtw89_mac_chinfo_be *chinfo_be;
+ struct rtw89_vif_link *rtwvif_link;
+ struct list_head *pos, *tmp;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+ u16 tu = 0;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ list_for_each_safe(pos, tmp, &scan_info->chan_list) {
+ switch (chip->chip_gen) {
+ case RTW89_CHIP_AX:
+ chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list);
+ tu += chinfo_ax->period;
+ break;
+ case RTW89_CHIP_BE:
+ chinfo_be = list_entry(pos, typeof(*chinfo_be), list);
+ tu += chinfo_be->period;
+ break;
+ default:
+ rtw89_warn(rtwdev, "%s: invalid chip gen %d\n",
+ __func__, chip->chip_gen);
+ return;
+ }
+ }
+
+ if (unlikely(tu == 0)) {
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "%s: cannot estimate needed TU\n", __func__);
+ return;
+ }
+
+ list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) {
+ unsigned int link_id;
+
+ vif = rtwvif_to_vif(rtwvif);
+ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
+ continue;
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
+ rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, tu);
+ }
+}
+
+int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_scan_request *scan_req)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
struct cfg80211_scan_request *req = &scan_req->req;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_chanctx_pause_parm pause_parm = {
+ .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
+ .trigger = rtwvif_link,
+ };
u32 rx_fltr = rtwdev->hal.rx_fltr;
u8 mac_addr[ETH_ALEN];
u32 reg;
+ int ret;
/* clone op and keep it during scan */
rtwdev->scan_info.op_chan = *chan;
+ rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
rtwdev->scan_info.scanning_vif = rtwvif_link;
- rtwdev->scan_info.last_chan_idx = 0;
rtwdev->scan_info.abort = false;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
- ieee80211_stop_queues(rtwdev->hw);
- rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
req->mac_addr_mask);
else
ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
+
+ ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr);
+ if (ret) {
+ rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
+ return ret;
+ }
+
+ ieee80211_stop_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
+
rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
rx_fltr &= ~B_AX_A_BCN_CHK_EN;
@@ -7241,7 +7488,12 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
- rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
+ rtw89_chanctx_pause(rtwdev, &pause_parm);
+
+ if (mode == RTW89_ENTITY_MODE_MCC)
+ rtw89_hw_scan_update_beacon_noa(rtwdev, req);
+
+ return 0;
}
struct rtw89_hw_scan_complete_cb_data {
@@ -7252,20 +7504,16 @@ struct rtw89_hw_scan_complete_cb_data {
static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_hw_scan_complete_cb_data *cb_data = data;
struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
struct cfg80211_scan_info info = {
.aborted = cb_data->aborted,
};
- struct rtw89_vif *rtwvif;
u32 reg;
if (!rtwvif_link)
return -EINVAL;
- rtwvif = rtwvif_link->rtwvif;
-
reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
@@ -7275,12 +7523,7 @@ static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
- rtw89_release_pkt_list(rtwdev);
- rtwvif->scan_req = NULL;
- rtwvif->scan_ies = NULL;
- scan_info->last_chan_idx = 0;
- scan_info->scanning_vif = NULL;
- scan_info->abort = false;
+ rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
return 0;
}
@@ -7355,11 +7598,11 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
if (!rtwvif_link)
return -EINVAL;
- connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
+ connected = rtwdev->scan_info.connected;
opt.enable = enable;
opt.target_ch_mode = connected;
if (enable) {
- ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected);
+ ret = mac->add_chan_list(rtwdev, rtwvif_link);
if (ret)
goto out;
}
@@ -8719,6 +8962,47 @@ int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
return 0;
}
+int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ bool enable)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
+ struct rtw89_h2c_mlo_link_cfg *h2c;
+ u8 mac_id = rtwvif_link->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) |
+ le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MLO,
+ H2C_FUNC_MLO_LINK_CFG, 0, 0,
+ len);
+
+ cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n",
+ str_enable_disable(enable), rtwvif_link->link_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
@@ -9106,6 +9390,26 @@ void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
}
}
+static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev,
+ const struct rtw89_rfe_parms *parms)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->support_bands & BIT(NL80211_BAND_2GHZ) &&
+ !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru))
+ return false;
+
+ if (chip->support_bands & BIT(NL80211_BAND_5GHZ) &&
+ !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru))
+ return false;
+
+ if (chip->support_bands & BIT(NL80211_BAND_6GHZ) &&
+ !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru))
+ return false;
+
+ return true;
+}
+
const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
const struct rtw89_rfe_parms *init)
@@ -9142,6 +9446,21 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
}
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz);
+ parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz);
+ parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz);
+ parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v;
+ }
+
if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
@@ -9157,6 +9476,21 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
}
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz);
+ parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz);
+ parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v;
+ }
+
+ if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) {
+ rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz);
+ parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v;
+ }
+
if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
@@ -9167,5 +9501,7 @@ rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
}
+ parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms);
+
return parms;
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 55255b48bdb7..0fcc824e41be 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -199,6 +199,7 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_TWT,
RTW89_FW_LOG_COMP_RF,
RTW89_FW_LOG_COMP_MCC = 20,
+ RTW89_FW_LOG_COMP_MLO = 26,
RTW89_FW_LOG_COMP_SCAN = 28,
};
@@ -333,9 +334,9 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE)
#define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE)
-#define RTW89_BCN_LOSS_CNT 10
+#define RTW89_BCN_LOSS_CNT 60
-struct rtw89_mac_chinfo {
+struct rtw89_mac_chinfo_ax {
u8 period;
u8 dwell_time;
u8 central_ch;
@@ -1636,6 +1637,8 @@ struct rtw89_h2c_join_v1 {
#define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3)
#define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4)
#define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12)
+#define RTW89_H2C_JOININFO_MLO_MODE_MLMR 0
+#define RTW89_H2C_JOININFO_MLO_MODE_MLSR 1
#define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13)
#define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14)
#define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15)
@@ -2863,6 +2866,13 @@ struct rtw89_h2c_fwips {
#define RTW89_H2C_FW_IPS_W0_MACID GENMASK(7, 0)
#define RTW89_H2C_FW_IPS_W0_ENABLE BIT(8)
+struct rtw89_h2c_mlo_link_cfg {
+ __le32 w0;
+};
+
+#define RTW89_H2C_MLO_LINK_CFG_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_MLO_LINK_CFG_W0_OPTION GENMASK(19, 16)
+
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
@@ -3562,6 +3572,7 @@ struct rtw89_c2h_done_ack {
#define RTW89_C2H_DONE_ACK_W2_CLASS GENMASK(7, 2)
#define RTW89_C2H_DONE_ACK_W2_FUNC GENMASK(15, 8)
#define RTW89_C2H_DONE_ACK_W2_H2C_RETURN GENMASK(23, 16)
+#define RTW89_C2H_SCAN_DONE_ACK_RETURN GENMASK(5, 0)
#define RTW89_C2H_DONE_ACK_W2_H2C_SEQ GENMASK(31, 24)
#define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \
@@ -3620,6 +3631,19 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_C2H_RA_RPT_W3_MD_SEL_B2 BIT(15)
#define RTW89_C2H_RA_RPT_W3_BW_B2 BIT(16)
+struct rtw89_c2h_fw_scan_rpt {
+ struct rtw89_c2h_hdr hdr;
+ u8 phy_idx;
+ u8 band;
+ u8 center_ch;
+ u8 ofdm_pd_idx; /* in unit of 2 dBm */
+#define PD_LOWER_BOUND_BASE 102
+ s8 cck_pd_idx;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+} __packed;
+
/* For WiFi 6 chips:
* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS
* HT-new: [6:5]: NA, [4:0]: MCS
@@ -3717,6 +3741,25 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_c2h_mlo_link_cfg_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID GENMASK(15, 0)
+#define RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS GENMASK(19, 16)
+
+enum rtw89_c2h_mlo_link_status {
+ RTW89_C2H_MLO_LINK_CFG_IDLE = 0,
+ RTW89_C2H_MLO_LINK_CFG_DONE = 1,
+ RTW89_C2H_MLO_LINK_CFG_ISSUE_NULL_FAIL = 2,
+ RTW89_C2H_MLO_LINK_CFG_TX_NULL_FAIL = 3,
+ RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST = 4,
+ RTW89_C2H_MLO_LINK_CFG_NULL_1_TIMEOUT = 5,
+ RTW89_C2H_MLO_LINK_CFG_NULL_0_TIMEOUT = 6,
+ RTW89_C2H_MLO_LINK_CFG_RUNNING = 0xff,
+};
+
struct rtw89_mac_mrc_tsf_rpt {
unsigned int num;
u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
@@ -3813,7 +3856,8 @@ struct rtw89_h2c_bcnfltr {
#define RTW89_H2C_BCNFLTR_W0_MON_BCN BIT(1)
#define RTW89_H2C_BCNFLTR_W0_MON_EN BIT(2)
#define RTW89_H2C_BCNFLTR_W0_MODE GENMASK(4, 3)
-#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT GENMASK(11, 8)
+#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3 GENMASK(7, 5)
+#define RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4 GENMASK(11, 8)
#define RTW89_H2C_BCNFLTR_W0_RSSI_HYST GENMASK(15, 12)
#define RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD GENMASK(23, 16)
#define RTW89_H2C_BCNFLTR_W0_MAC_ID GENMASK(31, 24)
@@ -3891,6 +3935,12 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18,
RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19,
RTW89_FW_ELEMENT_ID_REGD = 20,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ = 21,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ = 22,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ = 23,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25,
+ RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -4229,6 +4279,26 @@ enum rtw89_mcc_h2c_func {
#define RTW89_MCC_WAIT_COND(group, func) \
((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+/* CLASS 20 - MLO */
+#define H2C_CL_MLO 0x14
+enum rtw89_mlo_h2c_func {
+ H2C_FUNC_MLO_TBL_CFG = 0x0,
+ H2C_FUNC_MLO_STA_CFG = 0x1,
+ H2C_FUNC_MLO_TTLM = 0x2,
+ H2C_FUNC_MLO_DM_CFG = 0x3,
+ H2C_FUNC_MLO_EMLSR_STA_CFG = 0x4,
+ H2C_FUNC_MLO_MCMLO_RELINK_DROP = 0x5,
+ H2C_FUNC_MLO_MCMLO_SN_SYNC = 0x6,
+ H2C_FUNC_MLO_RELINK = 0x7,
+ H2C_FUNC_MLO_LINK_CFG = 0x8,
+ H2C_FUNC_MLO_DM_DBG = 0x9,
+
+ NUM_OF_RTW89_MLO_H2C_FUNC,
+};
+
+#define RTW89_MLO_WAIT_COND(macid, func) \
+ ((macid) * NUM_OF_RTW89_MLO_H2C_FUNC + (func))
+
/* CLASS 24 - MRC */
#define H2C_CL_MRC 0x18
enum rtw89_mrc_h2c_func {
@@ -4715,9 +4785,9 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *c2h_info);
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
-void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_scan_request *scan_req);
+int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_scan_request *scan_req);
void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
bool aborted);
@@ -4726,12 +4796,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
+void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev);
int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected);
+ struct rtw89_vif_link *rtwvif_link);
int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
+void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev);
int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected);
+ struct rtw89_vif_link *rtwvif_link);
int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
@@ -4800,6 +4876,8 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_upd_duration_arg *arg);
int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en);
+int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ bool enable);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b4841f948ec1..9f0e30e75009 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -4631,11 +4631,17 @@ static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev,
if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif_link == rtwvif_src)
return;
+ if (rtwvif_link->rand_tsf_done)
+ goto out;
+
/* adjust offset randomly to avoid beacon conflict */
offset = offset - offset / 4 + get_random_u32() % (offset / 2);
rtw89_mac_port_tsf_sync(rtwdev, rtwvif_link, rtwvif_src,
(*n_offset) * offset);
+ rtwvif_link->rand_tsf_done = true;
+
+out:
(*n_offset)++;
}
@@ -4866,6 +4872,8 @@ void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
{
rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
+
+ rtwvif_link->rand_tsf_done = false;
}
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
@@ -4899,11 +4907,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
- u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
u16 actual_period, expect_period;
u8 reason, status, tx_fail, band;
u8 mac_idx, sw_def, fw_def;
u8 ver = U8_MAX;
+ u32 report_tsf;
u16 chan;
int ret;
@@ -4962,7 +4970,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
if (rtwvif_link && rtwvif->scan_req &&
- last_chan < rtwvif->scan_req->n_channels) {
+ !list_empty(&rtwdev->scan_info.chan_list)) {
ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
if (ret) {
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
@@ -5017,7 +5025,8 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
switch (type) {
case RTW89_BCN_FLTR_BEACON_LOSS:
- if (!rtwdev->scanning && !rtwvif->offchan)
+ if (!rtwdev->scanning && !rtwvif->offchan &&
+ !rtwvif_link->noa_once.in_duration)
ieee80211_connection_loss(vif);
else
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
@@ -5110,12 +5119,14 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
return;
case H2C_FUNC_ADD_SCANOFLD_CH:
cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+ h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN;
break;
case H2C_FUNC_SCANOFLD:
cond = RTW89_SCANOFLD_WAIT_COND_START;
break;
case H2C_FUNC_SCANOFLD_BE:
cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
+ h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN;
break;
}
@@ -5399,6 +5410,27 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
}
static void
+rtw89_mac_c2h_mlo_link_cfg_stat(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ const struct rtw89_c2h_mlo_link_cfg_rpt *c2h_rpt;
+ struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ u16 mac_id;
+ u8 status;
+
+ c2h_rpt = (const struct rtw89_c2h_mlo_link_cfg_rpt *)c2h->data;
+
+ mac_id = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID);
+ status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS);
+
+ data.err = status == RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST ||
+ status == RTW89_C2H_MLO_LINK_CFG_RUNNING;
+ cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
+static void
rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
@@ -5553,6 +5585,18 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
};
static
+void (* const rtw89_mac_c2h_mlo_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MLO_GET_TBL] = NULL,
+ [RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE] = NULL,
+ [RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE] = NULL,
+ [RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT] = NULL,
+ [RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT] = NULL,
+ [RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT] = rtw89_mac_c2h_mlo_link_cfg_stat,
+ [RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP] = NULL,
+};
+
+static
void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt,
@@ -5621,6 +5665,8 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
}
case RTW89_MAC_C2H_CLASS_MCC:
return true;
+ case RTW89_MAC_C2H_CLASS_MLO:
+ return true;
case RTW89_MAC_C2H_CLASS_MRC:
return true;
case RTW89_MAC_C2H_CLASS_WOW:
@@ -5654,6 +5700,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
handler = rtw89_mac_c2h_mcc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MLO:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MLO)
+ handler = rtw89_mac_c2h_mlo_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_MRC:
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
handler = rtw89_mac_c2h_mrc_handler[func];
@@ -6889,6 +6939,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.is_txq_empty = mac_is_txq_empty_ax,
+ .prep_chan_list = rtw89_hw_scan_prep_chan_list_ax,
+ .free_chan_list = rtw89_hw_scan_free_chan_list_ax,
.add_chan_list = rtw89_hw_scan_add_chan_list_ax,
.add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax,
.scan_offload = rtw89_fw_h2c_scan_offload_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index fd7935d24501..8013c852d5be 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -370,6 +370,7 @@ enum rtw89_mac_mem_sel {
RTW89_MAC_MEM_TXD_FIFO_0_V1,
RTW89_MAC_MEM_TXD_FIFO_1_V1,
RTW89_MAC_MEM_WD_PAGE,
+ RTW89_MAC_MEM_MLD_TBL,
/* keep last */
RTW89_MAC_MEM_NUM,
@@ -427,6 +428,18 @@ enum rtw89_mac_c2h_mcc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
};
+enum rtw89_mac_c2h_mlo_func {
+ RTW89_MAC_C2H_FUNC_MLO_GET_TBL = 0x0,
+ RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE = 0x1,
+ RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE = 0x2,
+ RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT = 0x3,
+ RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT = 0x4,
+ RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT = 0x5,
+ RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP = 0x6,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MLO,
+};
+
enum rtw89_mac_c2h_mrc_func {
RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0,
RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1,
@@ -453,6 +466,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_WOW = 0x3,
RTW89_MAC_C2H_CLASS_MCC = 0x4,
RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
+ RTW89_MAC_C2H_CLASS_MLO = 0xc,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_AP = 0x18,
RTW89_MAC_C2H_CLASS_MAX,
@@ -1031,8 +1045,11 @@ struct rtw89_mac_gen_def {
bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+ int (*prep_chan_list)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
+ void (*free_chan_list)(struct rtw89_dev *rtwdev);
int (*add_chan_list)(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool connected);
+ struct rtw89_vif_link *rtwvif_link);
int (*add_chan_list_pno)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int (*scan_offload)(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 4fded07d0bee..a47971003bd4 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -114,11 +114,14 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
+ rtw89_p2p_noa_once_init(rtwvif_link);
+
rtwvif_link->hit_rule = 0;
rtwvif_link->bcn_hit_cond = 0;
rtwvif_link->chanctx_assigned = false;
rtwvif_link->chanctx_idx = RTW89_CHANCTX_0;
rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ rtwvif_link->rand_tsf_done = false;
rcu_read_lock();
@@ -142,6 +145,8 @@ static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work);
+ rtw89_p2p_noa_once_deinit(rtwvif_link);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, NULL, BTC_ROLE_STOP);
@@ -186,6 +191,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) {
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
INIT_LIST_HEAD(&rtwvif->mgnt_entry);
+ INIT_LIST_HEAD(&rtwvif->dlink_pool);
}
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -479,6 +485,8 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
int i;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ rtwvif->mlo_mode = RTW89_MLO_MODE_MLSR;
+
/* for station mode, assign the mac_id from itself */
macid = rtw89_vif_get_main_macid(rtwvif);
} else {
@@ -494,6 +502,8 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw89_core_txq_init(rtwdev, sta->txq[i]);
+ INIT_LIST_HEAD(&rtwsta->dlink_pool);
+
skb_queue_head_init(&rtwsta->roc_queue);
bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
@@ -1018,7 +1028,7 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_sta_link *rtwsta_link;
- rtwsta_link = rtw89_sta_get_link_inst(rtwsta, 0);
+ rtwsta_link = rtw89_get_designated_link(rtwsta);
if (unlikely(!rtwsta_link))
return;
@@ -1153,12 +1163,14 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n");
+ rtw89_err(rtwdev, "sw scan start: find no designated link\n");
return;
}
+ rtw89_leave_lps(rtwdev);
+
rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false);
}
@@ -1171,9 +1183,9 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n");
+ rtw89_err(rtwdev, "sw scan complete: find no designated link\n");
return;
}
@@ -1205,13 +1217,19 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (rtwdev->scanning || rtwvif->offchan)
return -EBUSY;
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "hw scan: find no link on HW-0\n");
+ rtw89_err(rtwdev, "hw scan: find no designated link\n");
return -ENOLINK;
}
- rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
+ rtw89_leave_lps(rtwdev);
+ rtw89_leave_ips_by_hwflags(rtwdev);
+
+ ret = rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
+ if (ret)
+ return ret;
+
ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
if (ret) {
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
@@ -1236,9 +1254,9 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
if (!rtwdev->scanning)
return;
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (unlikely(!rtwvif_link)) {
- rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
+ rtw89_err(rtwdev, "cancel hw scan: find no designated link\n");
return;
}
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 99b82dc85ea3..8c9d326dc907 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -29,6 +29,7 @@ static const u32 rtw89_mac_mem_base_addrs_be[RTW89_MAC_MEM_NUM] = {
[RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR_BE,
[RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR_BE,
[RTW89_MAC_MEM_WD_PAGE] = WD_PAGE_BASE_ADDR_BE,
+ [RTW89_MAC_MEM_MLD_TBL] = MLD_TBL_BASE_ADDR_BE,
};
static const struct rtw89_port_reg rtw89_port_base_be = {
@@ -2636,6 +2637,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.is_txq_empty = mac_is_txq_empty_be,
+ .prep_chan_list = rtw89_hw_scan_prep_chan_list_be,
+ .free_chan_list = rtw89_hw_scan_free_chan_list_be,
.add_chan_list = rtw89_hw_scan_add_chan_list_be,
.add_chan_list_pno = rtw89_pno_scan_add_chan_list_be,
.scan_offload = rtw89_fw_h2c_scan_offload_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index c2fe5a898dc7..064f6a940107 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -228,7 +228,7 @@ int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
- int rx_tag_retry = 100;
+ int rx_tag_retry = 1000;
int ret;
do {
@@ -3105,17 +3105,26 @@ static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
return false;
}
-static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
+static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
+ u8 val;
- if (!rtwpci->enable_dac)
- return;
+ if (!rtwpci->enable_dac && !force)
+ return 0;
if (!rtw89_pci_chip_is_manual_dac(rtwdev))
- return;
+ return 0;
- rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
+ /* Configure DAC only via PCI config API, not DBI interfaces */
+ ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= RTW89_PCIE_BIT_EN_64BITS;
+ return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val);
}
static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
@@ -3133,13 +3142,16 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
}
if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
- goto no_dac;
+ goto try_dac_done;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
if (!ret) {
- rtwpci->enable_dac = true;
- rtw89_pci_cfg_dac(rtwdev);
- } else {
+ ret = rtw89_pci_cfg_dac(rtwdev, true);
+ if (!ret) {
+ rtwpci->enable_dac = true;
+ goto try_dac_done;
+ }
+
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
rtw89_err(rtwdev,
@@ -3147,7 +3159,7 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
goto err_release_regions;
}
}
-no_dac:
+try_dac_done:
resource_len = pci_resource_len(pdev, bar_id);
rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
@@ -4302,7 +4314,7 @@ static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
{
if (resume)
- rtw89_pci_cfg_dac(rtwdev);
+ rtw89_pci_cfg_dac(rtwdev, false);
rtw89_pci_disable_eq(rtwdev);
rtw89_pci_filter_out(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index f4eee642e5ce..76a2e26d4a10 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2034,19 +2034,10 @@ static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
ant_gain->offset[path][subband_h]);
}
-static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq)
+static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq)
{
- struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 regd = rtw89_regd_get(rtwdev, band);
s8 offset_patha, offset_pathb;
- if (!chip->support_ant_gain)
- return 0;
-
- if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
- return 0;
-
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
@@ -2056,18 +2047,31 @@ static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 cente
return max(offset_patha, offset_pathb);
}
-s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band)
{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
- s8 offset_patha, offset_pathb;
+ u8 regd = rtw89_regd_get(rtwdev, band);
if (!chip->support_ant_gain)
- return 0;
+ return false;
if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
+ return false;
+
+ if (!rfe_parms->has_da)
+ return false;
+
+ return true;
+}
+
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ s8 offset_patha, offset_pathb;
+
+ if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type))
return 0;
if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
@@ -2083,14 +2087,10 @@ EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
const struct rtw89_chan *chan)
{
- struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
char *p = buf, *end = buf + bufsz;
s8 offset_patha, offset_pathb;
- if (!(chip->support_ant_gain && (ant_gain->regd_enabled & BIT(regd))) ||
- ant_gain->block_country) {
+ if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) {
p += scnprintf(p, end - p, "no DAG is applied\n");
goto out;
}
@@ -2255,20 +2255,31 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
+ const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
+ const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
+ s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt = 0, sar, offset;
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = freq,
+ .ntx = ntx,
+ };
s8 cstr;
switch (band) {
case RTW89_BAND_2G:
+ if (has_ant_gain)
+ da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
+
lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
if (lmt)
break;
@@ -2276,6 +2287,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
+ if (has_ant_gain)
+ da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
+
lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
if (lmt)
break;
@@ -2283,6 +2297,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
break;
case RTW89_BAND_6G:
+ if (has_ant_gain)
+ da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
+
lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
if (lmt)
break;
@@ -2296,9 +2313,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
- lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset);
- sar = rtw89_query_sar(rtwdev, freq);
+ da_lmt = da_lmt ?: S8_MAX;
+ if (da_lmt != S8_MAX)
+ offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
+
+ lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt));
+ sar = rtw89_query_sar(rtwdev, &sar_parm);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
return min3(lmt, sar, cstr);
@@ -2515,20 +2535,31 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
+ const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
+ const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
+ s8 lmt_ru = 0, da_lmt_ru = S8_MAX, sar, offset = 0;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt_ru = 0, sar, offset;
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = freq,
+ .ntx = ntx,
+ };
s8 cstr;
switch (band) {
case RTW89_BAND_2G:
+ if (has_ant_gain)
+ da_lmt_ru = (*rule_da_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
+
lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
if (lmt_ru)
break;
@@ -2536,6 +2567,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
break;
case RTW89_BAND_5G:
+ if (has_ant_gain)
+ da_lmt_ru = (*rule_da_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
+
lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
if (lmt_ru)
break;
@@ -2543,6 +2577,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
break;
case RTW89_BAND_6G:
+ if (has_ant_gain)
+ da_lmt_ru = (*rule_da_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
+
lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
if (lmt_ru)
break;
@@ -2556,9 +2593,12 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
- lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset);
- sar = rtw89_query_sar(rtwdev, freq);
+ da_lmt_ru = da_lmt_ru ?: S8_MAX;
+ if (da_lmt_ru != S8_MAX)
+ offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
+
+ lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt_ru + offset, da_lmt_ru));
+ sar = rtw89_query_sar(rtwdev, &sar_parm);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
return min3(lmt_ru, sar, cstr);
@@ -3015,6 +3055,35 @@ void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
};
+static void
+rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
+rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ const struct rtw89_c2h_fw_scan_rpt *c2h_rpt =
+ (const struct rtw89_c2h_fw_scan_rpt *)c2h->data;
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "%s: band: %u, op_chan: %u, PD_low_bd(ofdm, cck): (-%d, %d), phy_idx: %u\n",
+ __func__, c2h_rpt->band, c2h_rpt->center_ch,
+ PD_LOWER_BOUND_BASE - (c2h_rpt->ofdm_pd_idx << 1),
+ c2h_rpt->cck_pd_idx, c2h_rpt->phy_idx);
+}
+
+static
+void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_PHY_C2H_DM_FUNC_FW_TEST] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty,
+ [RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL,
+ [RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
+};
+
static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
enum rtw89_phy_c2h_rfk_log_func func,
void *content, u16 len)
@@ -3550,9 +3619,9 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler = rtw89_phy_c2h_rfk_report_handler[func];
break;
case RTW89_PHY_C2H_CLASS_DM:
- if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
- return;
- fallthrough;
+ if (func < ARRAY_SIZE(rtw89_phy_c2h_dm_handler))
+ handler = rtw89_phy_c2h_dm_handler[func];
+ break;
default:
rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 518a100375fb..5b451f1cfaac 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -164,6 +164,7 @@ enum rtw89_phy_c2h_dm_func {
RTW89_PHY_C2H_DM_FUNC_SIGB,
RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY,
RTW89_PHY_C2H_DM_FUNC_MCC_DIG,
+ RTW89_PHY_C2H_DM_FUNC_FW_SCAN = 0xc,
RTW89_PHY_C2H_DM_FUNC_NUM,
};
@@ -935,6 +936,20 @@ static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
}
+static inline s16 rtw89_phy_txpwr_mac_to_rf(struct rtw89_dev *rtwdev, s8 txpwr_mac)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_mac << (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
+}
+
+static inline s16 rtw89_phy_txpwr_mac_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_mac)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_mac << (chip->txpwr_factor_bb - chip->txpwr_factor_mac);
+}
+
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index 37d8f254ae32..d321cf1fc485 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -362,7 +362,7 @@ static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
- rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ALL, 0);
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index ac46a7baa00d..8e4fe73e7d77 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -382,3 +382,150 @@ u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data)
tail = ie->noa_desc + setter->noa_count;
return tail - *data;
}
+
+static void rtw89_ps_noa_once_set_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_ps_noa_once_handler *noa_once =
+ container_of(work, struct rtw89_ps_noa_once_handler, set_work.work);
+
+ lockdep_assert_wiphy(wiphy);
+
+ noa_once->in_duration = true;
+}
+
+static void rtw89_ps_noa_once_clr_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_ps_noa_once_handler *noa_once =
+ container_of(work, struct rtw89_ps_noa_once_handler, clr_work.work);
+ struct rtw89_vif_link *rtwvif_link =
+ container_of(noa_once, struct rtw89_vif_link, noa_once);
+ struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev;
+
+ lockdep_assert_wiphy(wiphy);
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ noa_once->in_duration = false;
+}
+
+void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once;
+
+ noa_once->in_duration = false;
+ noa_once->tsf_begin = 0;
+ noa_once->tsf_end = 0;
+
+ wiphy_delayed_work_init(&noa_once->set_work, rtw89_ps_noa_once_set_work);
+ wiphy_delayed_work_init(&noa_once->clr_work, rtw89_ps_noa_once_clr_work);
+}
+
+static void rtw89_p2p_noa_once_cancel(struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once;
+ struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev;
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ wiphy_delayed_work_cancel(wiphy, &noa_once->set_work);
+ wiphy_delayed_work_cancel(wiphy, &noa_once->clr_work);
+}
+
+void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link)
+{
+ rtw89_p2p_noa_once_cancel(rtwvif_link);
+ rtw89_p2p_noa_once_init(rtwvif_link);
+}
+
+void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_ps_noa_once_handler *noa_once = &rtwvif_link->noa_once;
+ struct rtw89_dev *rtwdev = rtwvif_link->rtwvif->rtwdev;
+ const struct ieee80211_p2p_noa_desc *noa_desc;
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+ struct ieee80211_bss_conf *bss_conf;
+ u64 tsf_begin = U64_MAX, tsf_end;
+ u64 set_delay_us = 0;
+ u64 clr_delay_us = 0;
+ u32 start_time;
+ u32 interval;
+ u32 duration;
+ u64 tsf;
+ int ret;
+ int i;
+
+ lockdep_assert_wiphy(wiphy);
+
+ ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
+ if (ret) {
+ rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
+ return;
+ }
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+
+ for (i = 0; i < ARRAY_SIZE(bss_conf->p2p_noa_attr.desc); i++) {
+ bool first = tsf_begin == U64_MAX;
+ u64 tmp;
+
+ noa_desc = &bss_conf->p2p_noa_attr.desc[i];
+ if (noa_desc->count == 0 || noa_desc->count == 255)
+ continue;
+
+ start_time = le32_to_cpu(noa_desc->start_time);
+ interval = le32_to_cpu(noa_desc->interval);
+ duration = le32_to_cpu(noa_desc->duration);
+
+ if (unlikely(duration == 0 ||
+ (noa_desc->count > 1 && interval == 0)))
+ continue;
+
+ tmp = start_time + interval * (noa_desc->count - 1) + duration;
+ tmp = (tsf & GENMASK_ULL(63, 32)) + tmp;
+ if (unlikely(tmp <= tsf))
+ continue;
+ tsf_end = first ? tmp : max(tsf_end, tmp);
+
+ tmp = (tsf & GENMASK_ULL(63, 32)) | start_time;
+ tsf_begin = first ? tmp : min(tsf_begin, tmp);
+ }
+
+ rcu_read_unlock();
+
+ if (tsf_begin == U64_MAX)
+ return;
+
+ rtw89_p2p_noa_once_cancel(rtwvif_link);
+
+ if (noa_once->tsf_end > tsf) {
+ tsf_begin = min(tsf_begin, noa_once->tsf_begin);
+ tsf_end = max(tsf_end, noa_once->tsf_end);
+ }
+
+ clr_delay_us = min_t(u64, tsf_end - tsf, UINT_MAX);
+
+ if (tsf_begin <= tsf) {
+ noa_once->in_duration = true;
+ goto out;
+ }
+
+ set_delay_us = tsf_begin - tsf;
+ if (unlikely(set_delay_us > UINT_MAX)) {
+ rtw89_warn(rtwdev, "%s: unhandled begin\n", __func__);
+ set_delay_us = 0;
+ clr_delay_us = 0;
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ noa_once->in_duration = false;
+ }
+
+out:
+ if (set_delay_us)
+ wiphy_delayed_work_queue(wiphy, &noa_once->set_work,
+ usecs_to_jiffies(set_delay_us));
+ if (clr_delay_us)
+ wiphy_delayed_work_queue(wiphy, &noa_once->clr_work,
+ usecs_to_jiffies(clr_delay_us));
+
+ noa_once->tsf_begin = tsf_begin;
+ noa_once->tsf_end = tsf_end;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index 2b88f254a32d..b2c43d44820d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -22,6 +22,9 @@ void rtw89_p2p_noa_renew(struct rtw89_vif_link *rtwvif_link);
void rtw89_p2p_noa_append(struct rtw89_vif_link *rtwvif_link,
const struct ieee80211_p2p_noa_desc *desc);
u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data);
+void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link);
+void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link);
+void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link);
static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index c776954ad360..f05c81ae5869 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -7601,7 +7601,15 @@
#define B_BE_PWR_FORCE_RU_ON BIT(18)
#define B_BE_PWR_FORCE_RU_ENON BIT(28)
#define R_BE_PWR_FORCE_MACID 0x11A48
-#define B_BE_PWR_FORCE_MACID_ON BIT(9)
+#define B_BE_PWR_FORCE_MACID_DBM_ON BIT(9)
+#define B_BE_PWR_FORCE_MACID_DBM_VAL GENMASK(17, 10)
+#define B_BE_PWR_FORCE_MACID_EN_VAL BIT(18)
+#define B_BE_PWR_FORCE_MACID_EN_ON BIT(19)
+#define B_BE_PWR_FORCE_MACID_ALL \
+ (B_BE_PWR_FORCE_MACID_DBM_ON | \
+ B_BE_PWR_FORCE_MACID_DBM_VAL | \
+ B_BE_PWR_FORCE_MACID_EN_VAL | \
+ B_BE_PWR_FORCE_MACID_EN_ON)
#define R_BE_PWR_REG_CTRL 0x11A50
#define B_BE_PWR_BT_EN BIT(23)
@@ -8737,8 +8745,10 @@
#define B_DPD_GDIS BIT(13)
#define B_IQK_RFC_ON BIT(1)
#define R_TXPWRB 0x56CC
+#define R_P1_TXPWRB 0x76CC
#define B_TXPWRB_ON BIT(28)
#define B_TXPWRB_VAL GENMASK(27, 19)
+#define B_TXPWRB_MAX GENMASK(8, 0)
#define R_DPD_OFT_EN 0x5800
#define B_DPD_OFT_EN BIT(28)
#define B_DPD_TSSI_CW GENMASK(26, 18)
@@ -9360,6 +9370,9 @@
#define R_TSSI_PWR_P0 0xE610
#define R_TSSI_PWR_P1 0xE710
#define B_TSSI_CONT_EN BIT(3)
+#define R_P0_TXPWRB_BE 0xE61C
+#define R_P1_TXPWRB_BE 0xE71C
+#define B_TXPWRB_MAX_BE GENMASK(20, 12)
#define R_TSSI_MAP_OFST_P0 0xE620
#define R_TSSI_MAP_OFST_P1 0xE720
#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 655323a79608..3ad14cab1f58 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -588,6 +588,38 @@ bottom:
kfree(sband);
}
+#define RTW89_DEF_REGD_STR(regd) \
+ [RTW89_ ## regd] = #regd
+
+static const char * const rtw89_regd_string[] = {
+ RTW89_DEF_REGD_STR(WW),
+ RTW89_DEF_REGD_STR(ETSI),
+ RTW89_DEF_REGD_STR(FCC),
+ RTW89_DEF_REGD_STR(MKK),
+ RTW89_DEF_REGD_STR(NA),
+ RTW89_DEF_REGD_STR(IC),
+ RTW89_DEF_REGD_STR(KCC),
+ RTW89_DEF_REGD_STR(ACMA),
+ RTW89_DEF_REGD_STR(NCC),
+ RTW89_DEF_REGD_STR(MEXICO),
+ RTW89_DEF_REGD_STR(CHILE),
+ RTW89_DEF_REGD_STR(UKRAINE),
+ RTW89_DEF_REGD_STR(CN),
+ RTW89_DEF_REGD_STR(QATAR),
+ RTW89_DEF_REGD_STR(UK),
+ RTW89_DEF_REGD_STR(THAILAND),
+};
+
+static_assert(ARRAY_SIZE(rtw89_regd_string) == RTW89_REGD_NUM);
+
+const char *rtw89_regd_get_string(enum rtw89_regulation_type regd)
+{
+ if (regd < 0 || regd >= RTW89_REGD_NUM)
+ return "(unknown)";
+
+ return rtw89_regd_string[regd];
+}
+
int rtw89_regd_setup(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
@@ -604,6 +636,7 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev)
}
regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ regulatory->txpwr_uk_follow_etsi = true;
if (!wiphy)
return -EINVAL;
@@ -726,11 +759,22 @@ static void rtw89_regd_apply_policy_tas(struct rtw89_dev *rtwdev)
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_regd *regd = regulatory->regd;
struct rtw89_tas_info *tas = &rtwdev->tas;
+ u8 tas_country;
if (!tas->enable)
return;
- tas->block_regd = !test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap);
+ if (memcmp("US", regd->alpha2, 2) == 0)
+ tas_country = RTW89_ACPI_CONF_TAS_US;
+ else if (memcmp("CA", regd->alpha2, 2) == 0)
+ tas_country = RTW89_ACPI_CONF_TAS_CA;
+ else if (memcmp("KR", regd->alpha2, 2) == 0)
+ tas_country = RTW89_ACPI_CONF_TAS_KR;
+ else
+ tas_country = RTW89_ACPI_CONF_TAS_OTHERS;
+
+ tas->block_regd = !(tas->enabled_countries & tas_country &&
+ test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap));
}
static void rtw89_regd_apply_policy_ant_gain(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 0d482cd57f6e..fafa200a9c8d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2499,12 +2499,14 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_unii4 = true,
.support_ant_gain = false,
.support_tas = false,
+ .support_sar_by_ant = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = false,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 1,
.tx_nss = 1,
.rx_nss = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 286334e26c84..cd5987fc52d7 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2217,12 +2217,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_unii4 = false,
.support_ant_gain = false,
.support_tas = false,
+ .support_sar_by_ant = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = false,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index eceb4fb9880d..dacdb384de2c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -853,12 +853,14 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = false,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 99c9505b3cbd..0cf03f18cbb1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -8,6 +8,7 @@
#include "phy.h"
#include "reg.h"
#include "rtw8852b_common.h"
+#include "sar.h"
#include "util.h"
static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = {
@@ -1234,6 +1235,7 @@ static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
u32_encode_bits(ref, B_DPD_REF);
}
+/* @pwr_ofst (unit: 1/8 dBm): power of path A minus power of path B */
static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
@@ -1336,6 +1338,27 @@ static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
tx_shape_ofdm);
}
+static s16 rtw8852bx_get_txpwr_sar_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = chan->freq,
+ .force_path = true,
+ };
+ s16 sar_bb_a, sar_bb_b;
+ s8 sar_mac;
+
+ sar_parm.path = RF_PATH_A;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_bb_a = rtw89_phy_txpwr_mac_to_bb(rtwdev, sar_mac);
+
+ sar_parm.path = RF_PATH_B;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_bb_b = rtw89_phy_txpwr_mac_to_bb(rtwdev, sar_mac);
+
+ return sar_bb_a - sar_bb_b;
+}
+
static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -1343,6 +1366,7 @@ static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
s16 pwr_ofst;
pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ pwr_ofst += rtw8852bx_get_txpwr_sar_diff(rtwdev, chan);
rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index bbf37442c492..289dce688d72 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -786,12 +786,14 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.hw_tkip_crypto = true,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 08bcdf246382..2a6143a8d256 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -15,7 +15,7 @@
#include "sar.h"
#include "util.h"
-#define RTW8852C_FW_FORMAT_MAX 1
+#define RTW8852C_FW_FORMAT_MAX 2
#define RTW8852C_FW_BASENAME "rtw89/rtw8852c_fw"
#define RTW8852C_MODULE_FIRMWARE \
RTW8852C_FW_BASENAME "-" __stringify(RTW8852C_FW_FORMAT_MAX) ".bin"
@@ -2079,6 +2079,31 @@ static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev,
rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
}
+static void rtw8852c_set_txpwr_sar_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = chan->freq,
+ .force_path = true,
+ };
+ s16 sar_rf;
+ s8 sar_mac;
+
+ if (phy_idx != RTW89_PHY_0)
+ return;
+
+ sar_parm.path = RF_PATH_A;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB, B_TXPWRB_MAX, sar_rf);
+
+ sar_parm.path = RF_PATH_B;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPWRB, B_TXPWRB_MAX, sar_rf);
+}
+
static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2089,6 +2114,7 @@ static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_sar_diff(rtwdev, chan, phy_idx);
}
static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -3014,12 +3040,14 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = true,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
.hw_tkip_crypto = true,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 8082592db84a..1d0f6e7df497 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -12,6 +12,7 @@
#include "reg.h"
#include "rtw8922a.h"
#include "rtw8922a_rfk.h"
+#include "sar.h"
#include "util.h"
#define RTW8922A_FW_FORMAT_MAX 3
@@ -2070,7 +2071,8 @@ static void __rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev,
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
rtw89_phy_rfk_dack_and_wait(rtwdev, phy_idx, chan, 58);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, phy_idx, chan, false, 32);
+ if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, phy_idx, chan, false, 128);
}
static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
@@ -2233,6 +2235,31 @@ static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev,
rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx);
}
+static void rtw8922a_set_txpwr_sar_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_sar_parm sar_parm = {
+ .center_freq = chan->freq,
+ .force_path = true,
+ };
+ s16 sar_rf;
+ s8 sar_mac;
+
+ if (phy_idx != RTW89_PHY_0)
+ return;
+
+ sar_parm.path = RF_PATH_A;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPWRB_BE, B_TXPWRB_MAX_BE, sar_rf);
+
+ sar_parm.path = RF_PATH_B;
+ sar_mac = rtw89_query_sar(rtwdev, &sar_parm);
+ sar_rf = rtw89_phy_txpwr_mac_to_rf(rtwdev, sar_mac);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPWRB_BE, B_TXPWRB_MAX_BE, sar_rf);
+}
+
static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2244,6 +2271,7 @@ static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
rtw8922a_set_txpwr_diff(rtwdev, chan, phy_idx);
rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8922a_set_txpwr_sar_diff(rtwdev, chan, phy_idx);
}
static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -2823,12 +2851,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
+ .support_sar_by_ant = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
.hw_tkip_crypto = true,
+ .hw_mlo_bmc_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index c4c93f836a2f..1659ea64ade1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -77,11 +77,6 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
RR_CFGCH_BAND0 | RR_CFGCH_CH);
rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
- if (band == RTW89_BAND_2G)
- rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
- else
- rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
-
switch (band) {
case RTW89_BAND_2G:
default:
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 0b5af9528702..517b66022f18 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -57,10 +57,12 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
}
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
- u32 center_freq, s32 *cfg)
+ const struct rtw89_sar_parm *sar_parm,
+ s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
enum rtw89_sar_subband subband_l, subband_h;
+ u32 center_freq = sar_parm->center_freq;
const struct rtw89_6ghz_span *span;
span = rtw89_get_6ghz_span(rtwdev, center_freq);
@@ -90,6 +92,93 @@ static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
return 0;
}
+static const struct rtw89_sar_entry_from_acpi *
+rtw89_sar_cfg_acpi_get_ent(const struct rtw89_sar_cfg_acpi *rtwsar,
+ enum rtw89_rf_path path,
+ enum rtw89_regulation_type regd)
+{
+ const struct rtw89_sar_indicator_from_acpi *ind = &rtwsar->indicator;
+ const struct rtw89_sar_table_from_acpi *tbl;
+ u8 sel;
+
+ sel = ind->tblsel[path];
+ tbl = &rtwsar->tables[sel];
+
+ return &tbl->entries[regd];
+}
+
+static
+s32 rtw89_sar_cfg_acpi_get_min(const struct rtw89_sar_entry_from_acpi *ent,
+ enum rtw89_rf_path path,
+ enum rtw89_acpi_sar_subband subband_low,
+ enum rtw89_acpi_sar_subband subband_high)
+{
+ return min(ent->v[subband_low][path], ent->v[subband_high][path]);
+}
+
+static int rtw89_query_sar_config_acpi(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_parm *sar_parm,
+ s32 *cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_sar_cfg_acpi *rtwsar = &rtwdev->sar.cfg_acpi;
+ const struct rtw89_sar_entry_from_acpi *ent_a, *ent_b;
+ enum rtw89_acpi_sar_subband subband_l, subband_h;
+ u32 center_freq = sar_parm->center_freq;
+ const struct rtw89_6ghz_span *span;
+ enum rtw89_regulation_type regd;
+ enum rtw89_band band;
+ s32 cfg_a, cfg_b;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
+
+ if (span && RTW89_ACPI_SAR_SPAN_VALID(span)) {
+ subband_l = span->acpi_sar_subband_low;
+ subband_h = span->acpi_sar_subband_high;
+ } else {
+ subband_l = rtw89_acpi_sar_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ band = rtw89_acpi_sar_subband_to_band(rtwdev, subband_l);
+ regd = rtw89_regd_get(rtwdev, band);
+
+ ent_a = rtw89_sar_cfg_acpi_get_ent(rtwsar, RF_PATH_A, regd);
+ ent_b = rtw89_sar_cfg_acpi_get_ent(rtwsar, RF_PATH_B, regd);
+
+ cfg_a = rtw89_sar_cfg_acpi_get_min(ent_a, RF_PATH_A, subband_l, subband_h);
+ cfg_b = rtw89_sar_cfg_acpi_get_min(ent_b, RF_PATH_B, subband_l, subband_h);
+
+ if (chip->support_sar_by_ant) {
+ /* With declaration of support_sar_by_ant, relax the general
+ * SAR querying to return the maximum between paths. However,
+ * expect chip has dealt with the corresponding SAR settings
+ * by path. (To get SAR for a given path, chip can then query
+ * with force_path.)
+ */
+ if (sar_parm->force_path) {
+ switch (sar_parm->path) {
+ default:
+ case RF_PATH_A:
+ *cfg = cfg_a;
+ break;
+ case RF_PATH_B:
+ *cfg = cfg_b;
+ break;
+ }
+ } else {
+ *cfg = max(cfg_a, cfg_b);
+ }
+ } else {
+ *cfg = min(cfg_a, cfg_b);
+ }
+
+ if (sar_parm->ntx == RTW89_2TX)
+ *cfg -= rtwsar->downgrade_2tx;
+
+ return 0;
+}
+
static const
struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
[RTW89_SAR_SOURCE_COMMON] = {
@@ -97,6 +186,11 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
.txpwr_factor_sar = 2,
.query_sar_config = rtw89_query_sar_config_common,
},
+ [RTW89_SAR_SOURCE_ACPI] = {
+ .descr_sar_source = "RTW89_SAR_SOURCE_ACPI",
+ .txpwr_factor_sar = TXPWR_FACTOR_OF_RTW89_ACPI_SAR,
+ .query_sar_config = rtw89_query_sar_config_acpi,
+ },
};
#define rtw89_sar_set_src(_dev, _src, _cfg_name, _cfg_data) \
@@ -175,7 +269,7 @@ static const char *rtw89_tas_state_str(enum rtw89_tas_state state)
}
}
-s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
+s8 rtw89_query_sar(struct rtw89_dev *rtwdev, const struct rtw89_sar_parm *sar_parm)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
@@ -191,7 +285,7 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
if (src == RTW89_SAR_SOURCE_NONE)
return RTW89_SAR_TXPWR_MAC_MAX;
- ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg);
+ ret = sar_hdl->query_sar_config(rtwdev, sar_parm, &cfg);
if (ret)
return RTW89_SAR_TXPWR_MAC_MAX;
@@ -215,9 +309,10 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg);
}
+EXPORT_SYMBOL(rtw89_query_sar);
int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
- u32 center_freq)
+ const struct rtw89_sar_parm *sar_parm)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
@@ -238,7 +333,7 @@ int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
p += scnprintf(p, end - p, "source: %d (%s)\n", src,
sar_hdl->descr_sar_source);
- ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg);
+ ret = sar_hdl->query_sar_config(rtwdev, sar_parm, &cfg);
if (ret) {
p += scnprintf(p, end - p, "config: return code: %d\n", ret);
p += scnprintf(p, end - p,
@@ -252,6 +347,8 @@ int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
p += scnprintf(p, end - p, "config: %d (unit: 1/%lu dBm)\n", cfg,
BIT(fct));
+ p += scnprintf(p, end - p, "support different configs by antenna: %s\n",
+ str_yes_no(rtwdev->chip->support_sar_by_ant));
out:
return p - buf;
}
@@ -286,16 +383,7 @@ out:
static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
const struct rtw89_sar_cfg_common *sar)
{
- enum rtw89_sar_sources src;
-
- lockdep_assert_wiphy(rtwdev->hw->wiphy);
-
- src = rtwdev->sar.src;
- if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) {
- rtw89_warn(rtwdev, "SAR source: %d is in use", src);
- return -EBUSY;
- }
-
+ /* let common SAR have the highest priority; always apply it */
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
rtw89_core_set_chip_txpwr(rtwdev);
rtw89_tas_reset(rtwdev, false);
@@ -363,18 +451,92 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
return rtw89_apply_sar_common(rtwdev, &sar_common);
}
+static void rtw89_apply_sar_acpi(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_cfg_acpi *sar)
+{
+ const struct rtw89_sar_table_from_acpi *tbl;
+ const struct rtw89_sar_entry_from_acpi *ent;
+ enum rtw89_sar_sources src;
+ unsigned int i, j, k;
+
+ src = rtwdev->sar.src;
+ if (src != RTW89_SAR_SOURCE_NONE) {
+ rtw89_warn(rtwdev, "SAR source: %d is in use", src);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "SAR-ACPI downgrade 2TX: %u (unit: 1/%lu dBm)\n",
+ sar->downgrade_2tx, BIT(TXPWR_FACTOR_OF_RTW89_ACPI_SAR));
+
+ for (i = 0; i < sar->valid_num; i++) {
+ tbl = &sar->tables[i];
+
+ for (j = 0; j < RTW89_REGD_NUM; j++) {
+ ent = &tbl->entries[j];
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "SAR-ACPI-[%u] REGD-%s (unit: 1/%lu dBm)\n",
+ i, rtw89_regd_get_string(j),
+ BIT(TXPWR_FACTOR_OF_RTW89_ACPI_SAR));
+
+ for (k = 0; k < NUM_OF_RTW89_ACPI_SAR_SUBBAND; k++)
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "On subband %u, { %d, %d }\n", k,
+ ent->v[k][RF_PATH_A], ent->v[k][RF_PATH_B]);
+ }
+ }
+
+ rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_ACPI, cfg_acpi, sar);
+
+ /* SAR via ACPI is only configured in the early initial phase, so
+ * it does not seem necessary to reset txpwr related things here.
+ */
+}
+
+static void rtw89_set_sar_from_acpi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_sar_cfg_acpi *cfg;
+ int ret;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ ret = rtw89_acpi_evaluate_sar(rtwdev, cfg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "evaluating ACPI SAR returns %d\n", ret);
+ goto out;
+ }
+
+ if (unlikely(!cfg->valid_num)) {
+ rtw89_debug(rtwdev, RTW89_DBG_SAR, "no valid SAR table from ACPI\n");
+ goto out;
+ }
+
+ rtw89_apply_sar_acpi(rtwdev, cfg);
+
+out:
+ kfree(cfg);
+}
+
static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
+ struct rtw89_sar_parm sar_parm = {};
int ret;
if (src == RTW89_SAR_SOURCE_NONE)
return false;
- ret = sar_hdl->query_sar_config(rtwdev, chan->freq, cfg);
+ sar_parm.center_freq = chan->freq;
+ ret = sar_hdl->query_sar_config(rtwdev, &sar_parm, cfg);
if (ret)
return false;
@@ -383,18 +545,27 @@ static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg)
return true;
}
-static void rtw89_tas_state_update(struct rtw89_dev *rtwdev,
- enum rtw89_tas_state state)
+static bool __rtw89_tas_state_update(struct rtw89_dev *rtwdev,
+ enum rtw89_tas_state state)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
if (tas->state == state)
- return;
+ return false;
rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: switch state: %s -> %s\n",
rtw89_tas_state_str(tas->state), rtw89_tas_state_str(state));
tas->state = state;
+ return true;
+}
+
+static void rtw89_tas_state_update(struct rtw89_dev *rtwdev,
+ enum rtw89_tas_state state)
+{
+ if (!__rtw89_tas_state_update(rtwdev, state))
+ return;
+
rtw89_core_set_chip_txpwr(rtwdev);
}
@@ -489,7 +660,7 @@ static void rtw89_tas_history_update(struct rtw89_dev *rtwdev)
rtw89_linear_to_db_quarter(div_u64(txpwr, PERCENT)));
}
-static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
+static bool rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
s32 dpr_on_threshold, dpr_off_threshold;
@@ -515,18 +686,18 @@ static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
else if (txpwr_avg < dpr_off_threshold)
state = RTW89_TAS_STATE_DPR_OFF;
else
- return;
+ return false;
- rtw89_tas_state_update(rtwdev, state);
+ return __rtw89_tas_state_update(rtwdev, state);
}
-void rtw89_tas_init(struct rtw89_dev *rtwdev)
+static void rtw89_tas_init(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tas_info *tas = &rtwdev->tas;
+ const struct rtw89_acpi_policy_tas *ptr;
struct rtw89_acpi_dsm_result res = {};
int ret;
- u8 val;
if (!chip->support_tas)
return;
@@ -538,8 +709,9 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev)
return;
}
- val = res.u.value;
- switch (val) {
+ ptr = res.u.policy_tas;
+
+ switch (ptr->enable) {
case 0:
tas->enable = false;
break;
@@ -552,8 +724,13 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev)
if (!tas->enable) {
rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n");
- return;
+ goto out;
}
+
+ tas->enabled_countries = ptr->enabled_countries;
+
+out:
+ kfree(ptr);
}
void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force)
@@ -598,29 +775,28 @@ void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force)
"tas: band: %u, freq: %u\n", chan->band_type, chan->freq);
}
-void rtw89_tas_track(struct rtw89_dev *rtwdev)
+static bool rtw89_tas_track(struct rtw89_dev *rtwdev)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
struct rtw89_hal *hal = &rtwdev->hal;
s32 cfg;
if (hal->disabled_dm_bitmap & BIT(RTW89_DM_TAS))
- return;
+ return false;
if (!rtw89_tas_is_active(rtwdev))
- return;
+ return false;
- if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) {
- rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
- return;
- }
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd)
+ return __rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
if (tas->pause)
- return;
+ return false;
rtw89_tas_window_update(rtwdev);
rtw89_tas_history_update(rtwdev);
- rtw89_tas_rolling_average(rtwdev);
+
+ return rtw89_tas_rolling_average(rtwdev);
}
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start)
@@ -667,3 +843,51 @@ void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
}
}
EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
+
+void rtw89_sar_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_set_sar_from_acpi(rtwdev);
+ rtw89_tas_init(rtwdev);
+}
+
+static bool rtw89_sar_track_acpi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_sar_cfg_acpi *cfg = &rtwdev->sar.cfg_acpi;
+ struct rtw89_sar_indicator_from_acpi *ind = &cfg->indicator;
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ bool changed;
+ int ret;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ if (src != RTW89_SAR_SOURCE_ACPI)
+ return false;
+
+ if (!ind->enable_sync)
+ return false;
+
+ ret = rtw89_acpi_evaluate_dynamic_sar_indicator(rtwdev, cfg, &changed);
+ if (likely(!ret))
+ return changed;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "%s: failed to track indicator: %d; reset and disable\n",
+ __func__, ret);
+
+ memset(ind->tblsel, 0, sizeof(ind->tblsel));
+ ind->enable_sync = false;
+ return true;
+}
+
+void rtw89_sar_track(struct rtw89_dev *rtwdev)
+{
+ unsigned int changes = 0;
+
+ changes += rtw89_sar_track_acpi(rtwdev);
+ changes += rtw89_tas_track(rtwdev);
+
+ if (!changes)
+ return;
+
+ rtw89_core_set_chip_txpwr(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index 0df1661db9a8..4b7f3d44f57b 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -10,25 +10,34 @@
#define RTW89_SAR_TXPWR_MAC_MAX 63
#define RTW89_SAR_TXPWR_MAC_MIN -64
+struct rtw89_sar_parm {
+ u32 center_freq;
+ enum rtw89_ntx ntx;
+
+ bool force_path;
+ enum rtw89_rf_path path;
+};
+
struct rtw89_sar_handler {
const char *descr_sar_source;
u8 txpwr_factor_sar;
- int (*query_sar_config)(struct rtw89_dev *rtwdev, u32 center_freq, s32 *cfg);
+ int (*query_sar_config)(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_parm *sar_parm, s32 *cfg);
};
extern const struct cfg80211_sar_capa rtw89_sar_capa;
-s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq);
+s8 rtw89_query_sar(struct rtw89_dev *rtwdev, const struct rtw89_sar_parm *sar_parm);
int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
- u32 center_freq);
+ const struct rtw89_sar_parm *sar_parm);
int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz);
int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
-void rtw89_tas_init(struct rtw89_dev *rtwdev);
void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
-void rtw89_tas_track(struct rtw89_dev *rtwdev);
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_state state);
+void rtw89_sar_init(struct rtw89_dev *rtwdev);
+void rtw89_sar_track(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 0740e303680c..811c91481441 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -309,6 +309,9 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif_link->port);
rtwvif_link->net_type = RTW89_NET_TYPE_NO_LINK;
rtwvif_link->trigger = false;
+ rtwvif_link->rand_tsf_done = false;
+
+ rtw89_p2p_noa_once_deinit(rtwvif_link);
}
}
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 70fe7cebc9d5..94f27a9ee9f7 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -712,6 +712,25 @@ static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid)
}
}
+static inline u8
+rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link;
+
+ if (desc_info->hiq) {
+ if (rtwvif_link->mac_idx == RTW89_MAC_1)
+ return RTW89_TX_QSEL_B1_HI;
+ else
+ return RTW89_TX_QSEL_B0_HI;
+ }
+
+ if (rtwvif_link->mac_idx == RTW89_MAC_1)
+ return RTW89_TX_QSEL_B1_MGMT;
+ else
+ return RTW89_TX_QSEL_B0_MGMT;
+}
+
static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
{
switch (qsel) {
@@ -719,12 +738,24 @@ static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
fallthrough;
case RTW89_TX_QSEL_BE_0:
+ case RTW89_TX_QSEL_BE_1:
+ case RTW89_TX_QSEL_BE_2:
+ case RTW89_TX_QSEL_BE_3:
return RTW89_TXCH_ACH0;
case RTW89_TX_QSEL_BK_0:
+ case RTW89_TX_QSEL_BK_1:
+ case RTW89_TX_QSEL_BK_2:
+ case RTW89_TX_QSEL_BK_3:
return RTW89_TXCH_ACH1;
case RTW89_TX_QSEL_VI_0:
+ case RTW89_TX_QSEL_VI_1:
+ case RTW89_TX_QSEL_VI_2:
+ case RTW89_TX_QSEL_VI_3:
return RTW89_TXCH_ACH2;
case RTW89_TX_QSEL_VO_0:
+ case RTW89_TX_QSEL_VO_1:
+ case RTW89_TX_QSEL_VO_2:
+ case RTW89_TX_QSEL_VO_3:
return RTW89_TXCH_ACH3;
case RTW89_TX_QSEL_B0_MGMT:
return RTW89_TXCH_CH8;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 17eee58503cb..34a0ab49bd7a 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -1086,8 +1086,7 @@ static int rtw89_wow_set_wakeups(struct rtw89_dev *rtwdev,
rtw89_wow_init_pno(rtwdev, wowlan->nd_config);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- /* use the link on HW-0 to do wow flow */
- rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ rtwvif_link = rtw89_get_designated_link(rtwvif);
if (!rtwvif_link)
continue;
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index cf3e976471c6..f6add19d1da1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -1229,6 +1229,11 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
/* MLD not supported here */
u32 bcn_int = data->link_data[0].beacon_int;
u64 delta = abs(tsf - now);
+ struct ieee80211_bss_conf *conf;
+
+ conf = link_conf_dereference_protected(vif, data->link_data[0].link_id);
+ if (conf && !conf->enable_beacon)
+ return;
/* adjust after beaconing with new timestamp at old TBTT */
if (tsf > now) {
@@ -2273,7 +2278,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
{
struct mac80211_hwsim_link_data *link_data = arg;
u32 link_id = link_data->link_id;
- struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
struct mac80211_hwsim_data *data =
container_of(link_data, struct mac80211_hwsim_data,
link_data[link_id]);
@@ -2292,10 +2297,11 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
vif->type != NL80211_IFTYPE_OCB)
return;
- if (vif->mbssid_tx_vif && vif->mbssid_tx_vif != vif)
+ tx_bss_conf = rcu_access_pointer(link_conf->tx_bss_conf);
+ if (tx_bss_conf && tx_bss_conf != link_conf)
return;
- if (vif->bss_conf.ema_ap) {
+ if (link_conf->ema_ap) {
struct ieee80211_ema_beacons *ema;
u8 i = 0;